internal static ITransformer CreateTransformer(IHostEnvironment env, Options options, IDataView input)
        {
            Contracts.CheckValue(env, nameof(env));
            var h = env.Register(RegistrationName);

            h.CheckValue(options, nameof(options));
            h.CheckValue(input, nameof(input));
            h.CheckUserArg(Utils.Size(options.Columns) > 0, nameof(options.Columns), "Columns must be specified");

            // To each input column to the WordHashBagTransform, a tokenize transform is applied,
            // followed by applying WordHashVectorizeTransform.
            // Since WordHashBagTransform is a many-to-one column transform, for each
            // WordHashBagTransform.Column we may need to define multiple tokenize transform columns.
            // NgramHashExtractorTransform may need to define an identical number of HashTransform.Columns.
            // The intermediate columns are dropped at the end of using a DropColumnsTransform.
            IDataView view = input;

            var uniqueSourceNames = NgramExtractionUtils.GenerateUniqueSourceNames(h, options.Columns, view.Schema);

            Contracts.Assert(uniqueSourceNames.Length == options.Columns.Length);

            var           tokenizeColumns = new List <WordTokenizingEstimator.ColumnOptions>();
            var           extractorCols   = new NgramHashExtractingTransformer.Column[options.Columns.Length];
            var           colCount        = options.Columns.Length;
            List <string> tmpColNames     = new List <string>();

            for (int iinfo = 0; iinfo < colCount; iinfo++)
            {
                var column      = options.Columns[iinfo];
                int srcCount    = column.Source.Length;
                var curTmpNames = new string[srcCount];
                Contracts.Assert(uniqueSourceNames[iinfo].Length == options.Columns[iinfo].Source.Length);
                for (int isrc = 0; isrc < srcCount; isrc++)
                {
                    tokenizeColumns.Add(new WordTokenizingEstimator.ColumnOptions(curTmpNames[isrc] = uniqueSourceNames[iinfo][isrc], options.Columns[iinfo].Source[isrc]));
                }

                tmpColNames.AddRange(curTmpNames);
                extractorCols[iinfo] =
                    new NgramHashExtractingTransformer.Column
                {
                    Name                   = column.Name,
                    Source                 = curTmpNames,
                    NumberOfBits           = column.NumberOfBits,
                    NgramLength            = column.NgramLength,
                    Seed                   = column.Seed,
                    SkipLength             = column.SkipLength,
                    Ordered                = column.Ordered,
                    MaximumNumberOfInverts = column.MaximumNumberOfInverts,
                    FriendlyNames          = options.Columns[iinfo].Source,
                    UseAllLengths          = column.UseAllLengths
                };
            }

            ITransformer t1 = new WordTokenizingEstimator(env, tokenizeColumns.ToArray()).Fit(view);

            var featurizeArgs =
                new NgramHashExtractingTransformer.Options
            {
                UseAllLengths          = options.UseAllLengths,
                NumberOfBits           = options.NumberOfBits,
                NgramLength            = options.NgramLength,
                SkipLength             = options.SkipLength,
                Ordered                = options.Ordered,
                Seed                   = options.Seed,
                Columns                = extractorCols.ToArray(),
                MaximumNumberOfInverts = options.MaximumNumberOfInverts
            };

            view = t1.Transform(view);
            ITransformer t2 = NgramHashExtractingTransformer.Create(h, featurizeArgs, view);

            // Since we added columns with new names, we need to explicitly drop them before we return the IDataTransform.
            ITransformer t3 = new ColumnSelectingTransformer(env, null, tmpColNames.ToArray());

            return(new TransformerChain <ITransformer>(new[] { t1, t2, t3 }));
        }
        internal static ITransformer CreateTransfomer(IHostEnvironment env, Options options, IDataView input)
        {
            Contracts.CheckValue(env, nameof(env));
            var h = env.Register(RegistrationName);

            h.CheckValue(options, nameof(options));
            h.CheckValue(input, nameof(input));
            h.CheckUserArg(Utils.Size(options.Columns) > 0, nameof(options.Columns), "Columns must be specified");

            // Compose the WordBagTransform from a tokenize transform,
            // followed by a NgramExtractionTransform.
            // Since WordBagTransform is a many-to-one column transform, for each
            // WordBagTransform.Column with multiple sources, we first apply a ConcatTransform.

            // REVIEW: In order to not get n-grams that cross between vector slots, we need to
            // enable tokenize transforms to insert a special token between slots.

            // REVIEW: In order to make it possible to output separate bags for different columns
            // using the same dictionary, we need to find a way to make ConcatTransform remember the boundaries.

            var tokenizeColumns = new WordTokenizingEstimator.ColumnOptions[options.Columns.Length];

            var extractorArgs =
                new NgramExtractorTransform.Options()
            {
                MaxNumTerms   = options.MaxNumTerms,
                NgramLength   = options.NgramLength,
                SkipLength    = options.SkipLength,
                UseAllLengths = options.UseAllLengths,
                Weighting     = options.Weighting,
                Columns       = new NgramExtractorTransform.Column[options.Columns.Length]
            };

            for (int iinfo = 0; iinfo < options.Columns.Length; iinfo++)
            {
                var column = options.Columns[iinfo];
                h.CheckUserArg(!string.IsNullOrWhiteSpace(column.Name), nameof(column.Name));
                h.CheckUserArg(Utils.Size(column.Source) > 0, nameof(column.Source));
                h.CheckUserArg(column.Source.All(src => !string.IsNullOrWhiteSpace(src)), nameof(column.Source));

                tokenizeColumns[iinfo] = new WordTokenizingEstimator.ColumnOptions(column.Name, column.Source.Length > 1 ? column.Name : column.Source[0]);

                extractorArgs.Columns[iinfo] =
                    new NgramExtractorTransform.Column()
                {
                    Name          = column.Name,
                    Source        = column.Name,
                    MaxNumTerms   = column.MaxNumTerms,
                    NgramLength   = column.NgramLength,
                    SkipLength    = column.SkipLength,
                    Weighting     = column.Weighting,
                    UseAllLengths = column.UseAllLengths
                };
            }

            IDataView    view = input;
            ITransformer t0   = NgramExtractionUtils.ApplyConcatOnSources(h, options.Columns);

            view = t0.Transform(view);
            ITransformer t1 = new WordTokenizingEstimator(env, tokenizeColumns).Fit(view);

            view = t1.Transform(view);
            ITransformer t2 = NgramExtractorTransform.Create(h, extractorArgs, view);

            return(new TransformerChain <ITransformer>(new[] { t0, t1, t2 }));
        }