Beispiel #1
0
        public async void UpdateModelStatus()
        {
            _applicationStatusManager.ChangeCurrentAppStatus(Enums.Status.Working, "Working | loading model...");
            //get the last version of ml model with specific config
            try
            {
                Log.Information("Loading ml model.");
                Status = "Loading ml model...";
                var confDir    = Path.Join(Environment.GetFolderPath(Environment.SpecialFolder.LocalApplicationData), "lacmus");
                var configPath = Path.Join(confDir, "appConfig.json");
                _appConfig = await AppConfig.Create(configPath);

                var config = _appConfig.MlModelConfig;;
                // get local versions
                var localVersions = await MLModel.GetInstalledVersions(config);

                if (localVersions.Contains(config.ModelVersion))
                {
                    Log.Information($"Find local version: {config.Image.Name}:{config.Image.Tag}.");
                }
                else
                {
                    IsShowLoadModelButton = true;
                    throw new Exception($"There are no ml local models to init: {config.Image.Name}:{config.Image.Tag}");
                }
                Repository = config.Image.Name;
                Version    = $"{config.ModelVersion}";
                Type       = $"{config.Type}";
                using (var model = new MLModel(config))
                    await model.Download();
                Status  = $"Ready";
                IsError = false;
                Log.Information("Successfully loads ml model.");
            }
            catch (Exception e)
            {
                Status  = $"Not ready.";
                IsError = true;
                Error   = $"Error: {e.Message}";
                Log.Error(e, "Unable to load model.");
            }
            _applicationStatusManager.ChangeCurrentAppStatus(Enums.Status.Ready, "");
        }
Beispiel #2
0
        private async void LoadModel()
        {
            _applicationStatusManager.ChangeCurrentAppStatus(Enums.Status.Working, "Working | loading model...");
            //get the last version of ml model with specific config
            try
            {
                _applicationStatusManager.ChangeCurrentAppStatus(Enums.Status.Working, "");
                ModelManagerWindow window = new ModelManagerWindow(_window.LocalizationContext, ref _appConfig, _applicationStatusManager, _window.ThemeManager);
                _appConfig = await window.ShowResult();

                var config = _appConfig.MlModelConfig;
                // init local model or download and init it from docker registry
                var localVersions = await MLModel.GetInstalledVersions(config);

                if (localVersions.Contains(config.ModelVersion))
                {
                    Log.Information($"Find local version: {config.Image.Name}:{config.Image.Tag}.");
                }
                else
                {
                    IsShowLoadModelButton = true;
                    throw new Exception($"There are no ml local models to init: {config.Image.Name}:{config.Image.Tag}");
                }
                Repository = config.Image.Name;
                Version    = $"{config.ModelVersion}";
                Type       = $"{config.Type}";
                using (var model = new MLModel(config))
                    await model.Download();
                Status            = $"Ready";
                IsError           = false;
                _window.AppConfig = _appConfig;
            }
            catch (Exception e)
            {
                Status  = $"Not ready.";
                IsError = true;
                Error   = $"Error: {e.Message}";
                Log.Error(e, "Unable to load model.");
            }
            _applicationStatusManager.ChangeCurrentAppStatus(Enums.Status.Ready, "");
        }
        public async Task DownloadModel()
        {
            _applicationStatusManager.ChangeCurrentAppStatus(Enums.Status.Working, "Working | loading model...");
            try
            {
                if (SelectedAvailableModel == null)
                {
                    throw new Exception("No selected model.");
                }
                if (SelectedAvailableModel.Type == MLModelType.Gpu)
                {
                    if (!RuntimeInformation.IsOSPlatform(OSPlatform.Linux))
                    {
                        var msgbox = MessageBoxManager.GetMessageBoxStandardWindow(new MessageBoxStandardParams
                        {
                            ButtonDefinitions = ButtonEnum.Ok,
                            ContentTitle      = "OSError",
                            ContentMessage    = LocalizationContext.OsErrorMesageGPU,
                            Icon         = MessageBox.Avalonia.Enums.Icon.Error,
                            Style        = Style.None,
                            ShowInCenter = true
                        });
                        var result = await msgbox.Show();

                        throw new Exception($"Incorrect OS for {SelectedAvailableModel.Type} inference type");
                    }

                    /*
                     * if (CudafyHost.GetDeviceCount(eGPUType.Emulator) == 0)
                     * {
                     *  var msgbox = MessageBoxManager.GetMessageBoxStandardWindow(new MessageBoxStandardParams
                     *  {
                     *      ButtonDefinitions = ButtonEnum.Ok,
                     *      ContentTitle = "CUDA Error",
                     *      ContentMessage = "No CUDA devises.",
                     *      Icon = MessageBox.Avalonia.Enums.Icon.Error,
                     *      Style = Style.None,
                     *      ShowInCenter = true
                     *  });
                     *  var result = await msgbox.Show();
                     *  throw new Exception($"No CUDA devises.");
                     * }
                     */
                }

                var config = new MLModelConfig();
                config.Image.Name   = SelectedAvailableModel.Name;
                config.Type         = SelectedAvailableModel.Type;
                config.ModelVersion = SelectedAvailableModel.Version;
                config.ApiVersion   = SelectedAvailableModel.ApiVersion;
                config.Image.Tag    = config.GetDockerTag();

                using (var model = new MLModel(config))
                    await model.Download();
            }
            catch (Exception e)
            {
                Log.Error(e, "Unable to download ml model.");
            }
            _applicationStatusManager.ChangeCurrentAppStatus(Enums.Status.Ready, "");
        }