//static readonly string _modelPath = Path.Combine(Environment.CurrentDirectory, "sentiment_model"); internal static void Start() { MLContext mlContext = new MLContext(); //код для создания карты подстановки. var lookupMap = mlContext.Data.LoadFromTextFile(Path.Combine(_modelPath, "imdb_word_index.csv"), columns: new[] { new TextLoader.Column("Words", DataKind.String, 0), new TextLoader.Column("Ids", DataKind.Int32, 1), }, separatorChar: ',' ); Action <VariableLength, FixedLength> ResizeFeaturesAction = (s, f) => { var features = s.VariableLengthFeatures; Array.Resize(ref features, FeatureLength); f.Features = features; }; TensorFlowModel tensorFlowModel = mlContext.Model.LoadTensorFlowModel(_modelPath); DataViewSchema schema = tensorFlowModel.GetModelSchema(); Console.WriteLine(" =============== TensorFlow Model Schema =============== "); var featuresType = (VectorDataViewType)schema["Features"].Type; Console.WriteLine($"Name: Features, Type: {featuresType.ItemType.RawType}, Size: ({featuresType.Dimensions[0]})"); var predictionType = (VectorDataViewType)schema["Prediction/Softmax"].Type; Console.WriteLine($"Name: Prediction/Softmax, Type: {predictionType.ItemType.RawType}, Size: ({predictionType.Dimensions[0]})"); IEstimator <ITransformer> pipeline = // Split the text into individual words //разбить текст на слова в виде следующей строки кода mlContext.Transforms.Text.TokenizeIntoWords("TokenizedWords", "ReviewText") // Map each word to an integer value. The array of integer makes up the input features. //Сопоставьте слова с кодировкой целых чисел, используя таблицу подстановки .Append(mlContext.Transforms.Conversion.MapValue("VariableLengthFeatures", lookupMap, lookupMap.Schema["Words"], lookupMap.Schema["Ids"], "TokenizedWords")) // Resize variable length vector to fixed length vector. //Измените размер кодировок целых чисел переменной длины на фиксированную длину, необходимую для модели .Append(mlContext.Transforms.CustomMapping(ResizeFeaturesAction, "Resize")) // Passes the data to TensorFlow for scoring //Классифицируйте входные данные с помощью загруженной модели TensorFlow: .Append(tensorFlowModel.ScoreTensorFlowModel("Prediction/Softmax", "Features")) // Retrieves the 'Prediction' from TensorFlow and and copies to a column //Создайте новый столбец для прогнозирования выходных данных: .Append(mlContext.Transforms.CopyColumns("Prediction", "Prediction/Softmax")); // Create an executable model from the estimator pipeline //создания модели из конвейера IDataView dataView = mlContext.Data.LoadFromEnumerable(new List <MovieReview>()); ITransformer model = pipeline.Fit(dataView); PredictSentiment(mlContext, model); }
/// <summary> /// Create pipeline and split input text into words /// using TokenizeIntoWords transform to break the text into words /// </summary> /// <param name="mlContext"></param> /// <param name="lookupMap"></param> /// <param name="ResizeFeaturesAction"></param> /// <param name="tensorFlowModel"></param> /// <returns></returns> private static IEstimator <ITransformer> CreatePipeline(MLContext mlContext, IDataView lookupMap, Action <VariableLength, FixedLength> ResizeFeaturesAction, TensorFlowModel tensorFlowModel) { return(mlContext.Transforms.Text.TokenizeIntoWords("TokenizedWords", "ReviewText") .Append(mlContext.Transforms.Conversion.MapValue("VariableLengthFeatures", lookupMap, lookupMap.Schema["Words"], lookupMap.Schema["Ids"], "TokenizedWords")) .Append(mlContext.Transforms.CustomMapping(ResizeFeaturesAction, "Resize")) .Append(tensorFlowModel.ScoreTensorFlowModel("Prediction/Softmax", "Features")) .Append(mlContext.Transforms.CopyColumns("Prediction", "Prediction/Softmax"))); }
public void TestLoadMultipleModel() { var modelFile1 = "model_matmul/frozen_saved_model.pb"; var modelFile2 = "cifar_model/frozen_model.pb"; MLContext context = new MLContext(seed: 1); TensorFlowModel model1 = context.Model.LoadTensorFlowModel(modelFile1); TensorFlowModel model2 = context.Model.LoadTensorFlowModel(modelFile2); model1.ScoreTensorFlowModel(new[] { "c" }, new[] { "a", "b" }); model2.ScoreTensorFlowModel("Output", "Input"); }
static void Main(string[] args) { MLContext mlContext = new MLContext(); var lookupMap = mlContext.Data.LoadFromTextFile(Path.Combine(_modelPath, "imdb_word_index.csv"), columns: new[] { new TextLoader.Column("Words", DataKind.String, 0), new TextLoader.Column("Ids", DataKind.Int32, 1), }, separatorChar: ',' ); Action <VariableLength, FixedLength> ResizeFeaturesAction = (s, f) => { var features = s.VariableLengthFeatures; Array.Resize(ref features, FeatureLength); f.Features = features; }; TensorFlowModel tensorFlowModel = mlContext.Model.LoadTensorFlowModel(_modelPath); DataViewSchema schema = tensorFlowModel.GetModelSchema(); Console.WriteLine(" =============== TensorFlow Model Schema =============== "); var featuresType = (VectorDataViewType)schema["Features"].Type; Console.WriteLine($"Name: Features, Type: {featuresType.ItemType.RawType}, Size: ({featuresType.Dimensions[0]})"); var predictionType = (VectorDataViewType)schema["Prediction/Softmax"].Type; Console.WriteLine($"Name: Prediction/Softmax, Type: {predictionType.ItemType.RawType}, Size: ({predictionType.Dimensions[0]})"); IEstimator <ITransformer> pipeline = // Split the text into individual words mlContext.Transforms.Text.TokenizeIntoWords("TokenizedWords", "ReviewText") // Map each word to an integer value. The array of integer makes up the input features. .Append(mlContext.Transforms.Conversion.MapValue("VariableLengthFeatures", lookupMap, lookupMap.Schema["Words"], lookupMap.Schema["Ids"], "TokenizedWords")) // Resize variable length vector to fixed length vector. .Append(mlContext.Transforms.CustomMapping(ResizeFeaturesAction, "Resize")) // Passes the data to TensorFlow for scoring .Append(tensorFlowModel.ScoreTensorFlowModel("Prediction/Softmax", "Features")) // Retrieves the 'Prediction' from TensorFlow and and copies to a column .Append(mlContext.Transforms.CopyColumns("Prediction", "Prediction/Softmax")); // Create an executable model from the estimator pipeline IDataView dataView = mlContext.Data.LoadFromEnumerable(new List <MovieReview>()); ITransformer model = pipeline.Fit(dataView); PredictSentiment(mlContext, model); }
static void Main(string[] args) { MLContext mlContext = new MLContext(); var lookupMap = mlContext.Data.LoadFromTextFile(Path.Combine(_modelPath, "imdb_word_index.csv"), columns: new[] { new TextLoader.Column("Words", DataKind.String, 0), new TextLoader.Column("Ids", DataKind.Int32, 1), }, separatorChar: ',' ); Action <VariableLength, FixedLength> ResizeFeaturesAction = (s, f) => { var features = s.VariableLengthFeatures; Array.Resize(ref features, FeatureLength); f.Features = features; }; TensorFlowModel tensorFlowModel = mlContext.Model.LoadTensorFlowModel(_modelPath); DataViewSchema schema = tensorFlowModel.GetModelSchema(); Console.WriteLine(" =============== TensorFlow Model Schema =============== "); var featuresType = (VectorDataViewType)schema["Features"].Type; Console.WriteLine($"Name: Features, Type: {featuresType.ItemType.RawType}, Size: ({featuresType.Dimensions[0]})"); var predictionType = (VectorDataViewType)schema["Prediction/Softmax"].Type; Console.WriteLine($"Name: Prediction/Softmax, Type: {predictionType.ItemType.RawType}, Size: ({predictionType.Dimensions[0]})"); IEstimator <ITransformer> pipeline = mlContext.Transforms.Text.TokenizeIntoWords("TokenizedWords", "ReviewText") .Append(mlContext.Transforms.Conversion.MapValue("VariableLengthFeatures", lookupMap, lookupMap.Schema["Words"], lookupMap.Schema["Ids"], "TokenizedWords")) .Append(mlContext.Transforms.CustomMapping(ResizeFeaturesAction, "Resize")) .Append(tensorFlowModel.ScoreTensorFlowModel("Prediction/Softmax", "Features")) .Append(mlContext.Transforms.CopyColumns("Prediction", "Prediction/Softmax")); IDataView dataView = mlContext.Data.LoadFromEnumerable(new List <MovieReview>()); ITransformer model = pipeline.Fit(dataView); PredictSentiment(mlContext, model); }
// </SnippetDeclareGlobalVariables> static void Main(string[] args) { Console.WriteLine($"modelPath: ${_modelPath}"); // Create MLContext to be shared across the model creation workflow objects // <SnippetCreateMLContext> MLContext mlContext = new MLContext(); // </SnippetCreateMLContext> // Dictionary to encode words as integers. // <SnippetCreateLookupMap> var lookupMap = mlContext.Data.LoadFromTextFile(Path.Combine(_modelPath, "imdb_word_index.csv"), columns: new[] { new TextLoader.Column("Words", DataKind.String, 0), new TextLoader.Column("Ids", DataKind.Int32, 1), }, separatorChar: ',' ); // </SnippetCreateLookupMap> // The model expects the input feature vector to be a fixed length vector. // This action resizes the variable length array generated by the lookup map // to a fixed length vector. If there are less than 600 words in the sentence, // the remaining indices will be filled with zeros. If there are more than // 600 words in the sentence, then the array is truncated at 600. // <SnippetResizeFeatures> Action <VariableLength, FixedLength> ResizeFeaturesAction = (s, f) => { var features = s.VariableLengthFeatures; Array.Resize(ref features, FeatureLength); f.Features = features; }; // </SnippetResizeFeatures> // Load the TensorFlow model. // <SnippetLoadTensorFlowModel> TensorFlowModel tensorFlowModel = mlContext.Model.LoadTensorFlowModel(_modelPath); // </SnippetLoadTensorFlowModel> // <SnippetGetModelSchema> DataViewSchema schema = tensorFlowModel.GetModelSchema(); Console.WriteLine(" =============== TensorFlow Model Schema =============== "); var featuresType = (VectorDataViewType)schema["Features"].Type; Console.WriteLine($"Name: Features, Type: {featuresType.ItemType.RawType}, Size: ({featuresType.Dimensions[0]})"); var predictionType = (VectorDataViewType)schema["Prediction/Softmax"].Type; Console.WriteLine($"Name: Prediction/Softmax, Type: {predictionType.ItemType.RawType}, Size: ({predictionType.Dimensions[0]})"); // </SnippetGetModelSchema> // <SnippetTokenizeIntoWords> IEstimator <ITransformer> pipeline = // Split the text into individual words mlContext.Transforms.Text.TokenizeIntoWords("TokenizedWords", "ReviewText") // </SnippetTokenizeIntoWords> // <SnippetMapValue> // Map each word to an integer value. The array of integer makes up the input features. .Append(mlContext.Transforms.Conversion.MapValue("VariableLengthFeatures", lookupMap, lookupMap.Schema["Words"], lookupMap.Schema["Ids"], "TokenizedWords")) // </SnippetMapValue> // <SnippetCustomMapping> // Resize variable length vector to fixed length vector. .Append(mlContext.Transforms.CustomMapping(ResizeFeaturesAction, "Resize")) // </SnippetCustomMapping> // <SnippetScoreTensorFlowModel> // Passes the data to TensorFlow for scoring .Append(tensorFlowModel.ScoreTensorFlowModel("Prediction/Softmax", "Features")) // </SnippetScoreTensorFlowModel> // <SnippetCopyColumns> // Retrieves the 'Prediction' from TensorFlow and and copies to a column .Append(mlContext.Transforms.CopyColumns("Prediction", "Prediction/Softmax")); // </SnippetCopyColumns> // <SnippetCreateModel> // Create an executable model from the estimator pipeline IDataView dataView = mlContext.Data.LoadFromEnumerable(new List <MovieReview>()); ITransformer model = pipeline.Fit(dataView); // </SnippetCreateModel> // <SnippetCallPredictSentiment> PredictSentiment(mlContext, model); // </SnippetCallPredictSentiment> }