public async Task RemoveModel()
        {
            _applicationStatusManager.ChangeCurrentAppStatus(Enums.Status.Working, "Working | remove model...");
            try
            {
                if (SelectedInstalledModel == null)
                {
                    throw new Exception("No selected model.");
                }

                var config = new MLModelConfig();
                config.Image.Name   = SelectedInstalledModel.Name;
                config.Type         = SelectedInstalledModel.Type;
                config.ModelVersion = SelectedInstalledModel.Version;
                config.ApiVersion   = SelectedInstalledModel.ApiVersion;
                config.Image.Tag    = config.GetDockerTag();

                using (var model = new MLModel(config))
                    await model.Remove();

                if (SelectedInstalledModel.Name == Repository &&
                    Version == $"{SelectedInstalledModel.Version}" &&
                    API_VERSION == SelectedInstalledModel.ApiVersion &&
                    Type == $"{config.Type}")
                {
                    Repository = "None";
                    Type       = "None";
                    Version    = "None";
                    Status     = "Not ready";
                    await UpdateModelStatus();
                }
            }
            catch (Exception e)
            {
                Log.Error(e, "Unable to remove ml model.");
            }
            _applicationStatusManager.ChangeCurrentAppStatus(Enums.Status.Ready, "");
        }
        public async Task DownloadModel()
        {
            _applicationStatusManager.ChangeCurrentAppStatus(Enums.Status.Working, "Working | loading model...");
            try
            {
                if (SelectedAvailableModel == null)
                {
                    throw new Exception("No selected model.");
                }
                if (SelectedAvailableModel.Type == MLModelType.Gpu)
                {
                    if (!RuntimeInformation.IsOSPlatform(OSPlatform.Linux))
                    {
                        var msgbox = MessageBoxManager.GetMessageBoxStandardWindow(new MessageBoxStandardParams
                        {
                            ButtonDefinitions = ButtonEnum.Ok,
                            ContentTitle      = "OSError",
                            ContentMessage    = LocalizationContext.OsErrorMesageGPU,
                            Icon         = MessageBox.Avalonia.Enums.Icon.Error,
                            Style        = Style.None,
                            ShowInCenter = true
                        });
                        var result = await msgbox.Show();

                        throw new Exception($"Incorrect OS for {SelectedAvailableModel.Type} inference type");
                    }

                    /*
                     * if (CudafyHost.GetDeviceCount(eGPUType.Emulator) == 0)
                     * {
                     *  var msgbox = MessageBoxManager.GetMessageBoxStandardWindow(new MessageBoxStandardParams
                     *  {
                     *      ButtonDefinitions = ButtonEnum.Ok,
                     *      ContentTitle = "CUDA Error",
                     *      ContentMessage = "No CUDA devises.",
                     *      Icon = MessageBox.Avalonia.Enums.Icon.Error,
                     *      Style = Style.None,
                     *      ShowInCenter = true
                     *  });
                     *  var result = await msgbox.Show();
                     *  throw new Exception($"No CUDA devises.");
                     * }
                     */
                }

                var config = new MLModelConfig();
                config.Image.Name   = SelectedAvailableModel.Name;
                config.Type         = SelectedAvailableModel.Type;
                config.ModelVersion = SelectedAvailableModel.Version;
                config.ApiVersion   = SelectedAvailableModel.ApiVersion;
                config.Image.Tag    = config.GetDockerTag();

                using (var model = new MLModel(config))
                    await model.Download();
            }
            catch (Exception e)
            {
                Log.Error(e, "Unable to download ml model.");
            }
            _applicationStatusManager.ChangeCurrentAppStatus(Enums.Status.Ready, "");
        }