Пример #1
0
        public void MatMulPATest(int matTest, int srcTest, int dstTest, float[] expected)
        {
            AlignedArray mat = _testMatrices[matTest];
            AlignedArray src = _testSrcVectors[srcTest];
            AlignedArray dst = _testDstVectors[dstTest];

            int[] idx = _testIndexArray;

            CpuMathUtils.MatTimesSrc(false, false, mat, idx, src, 0, 0, (srcTest == 0) ? 4 : 9, dst, dst.Size);
            float[] actual = new float[dst.Size];
            dst.CopyTo(actual, 0, dst.Size);
            Assert.Equal(expected, actual, _matMulComparer);
        }
Пример #2
0
        private void InitializeTrainingState(int fieldCount, int featureCount, FieldAwareFactorizationMachineModelParameters predictor, out float[] linearWeights,
                                             out AlignedArray latentWeightsAligned, out float[] linearAccumulatedSquaredGrads, out AlignedArray latentAccumulatedSquaredGradsAligned)
        {
            linearWeights                        = new float[featureCount];
            latentWeightsAligned                 = new AlignedArray(featureCount * fieldCount * _latentDimAligned, 16);
            linearAccumulatedSquaredGrads        = new float[featureCount];
            latentAccumulatedSquaredGradsAligned = new AlignedArray(featureCount * fieldCount * _latentDimAligned, 16);

            if (predictor == null)
            {
                var rng = _host.Rand;
                for (int j = 0; j < featureCount; j++)
                {
                    linearWeights[j] = 0;
                    linearAccumulatedSquaredGrads[j] = 1;
                    for (int f = 0; f < fieldCount; f++)
                    {
                        int vBias = j * fieldCount * _latentDimAligned + f * _latentDimAligned;
                        for (int k = 0; k < _latentDimAligned; k++)
                        {
                            if (k < _latentDim)
                            {
                                latentWeightsAligned[vBias + k] = _radius * (float)rng.NextDouble();
                            }
                            else
                            {
                                latentWeightsAligned[vBias + k] = 0;
                            }
                            latentAccumulatedSquaredGradsAligned[vBias + k] = 1;
                        }
                    }
                }
            }
            else
            {
                predictor.CopyLinearWeightsTo(linearWeights);
                predictor.CopyLatentWeightsTo(latentWeightsAligned);
                for (int j = 0; j < featureCount; j++)
                {
                    linearAccumulatedSquaredGrads[j] = 1;
                    for (int f = 0; f < fieldCount; f++)
                    {
                        int vBias = j * fieldCount * _latentDimAligned + f * _latentDimAligned;
                        for (int k = 0; k < _latentDimAligned; k++)
                        {
                            latentAccumulatedSquaredGradsAligned[vBias + k] = 1;
                        }
                    }
                }
            }
        }
Пример #3
0
        private ValueGetter <VBuffer <Float> > GetterFromVectorType(IRow input, int iinfo)
        {
            var getSrc = GetSrcGetter <VBuffer <Float> >(input, iinfo);
            var src    = default(VBuffer <Float>);

            var featuresAligned = new AlignedArray(RoundUp(Infos[iinfo].TypeSrc.ValueCount, _cfltAlign), CpuMathUtils.GetVectorAlignment());
            var productAligned  = new AlignedArray(RoundUp(_transformInfos[iinfo].NewDim, _cfltAlign), CpuMathUtils.GetVectorAlignment());

            return
                ((ref VBuffer <Float> dst) =>
            {
                getSrc(ref src);
                TransformFeatures(Host, ref src, ref dst, _transformInfos[iinfo], featuresAligned, productAligned);
            });
        }
Пример #4
0
        private static double CalculateAvgLoss(IChannel ch, RoleMappedData data, bool norm, float[] linearWeights, AlignedArray latentWeightsAligned,
                                               int latentDimAligned, AlignedArray latentSum, int[] featureFieldBuffer, int[] featureIndexBuffer, float[] featureValueBuffer, VBuffer <float> buffer, ref long badExampleCount)
        {
            var featureColumns    = data.Schema.GetColumns(RoleMappedSchema.ColumnRole.Feature);
            Func <int, bool> pred = c => featureColumns.Select(ci => ci.Index).Contains(c) || c == data.Schema.Label.Value.Index || c == data.Schema.Weight?.Index;
            var    getters        = new ValueGetter <VBuffer <float> > [featureColumns.Count];
            float  label          = 0;
            float  weight         = 1;
            double loss           = 0;
            float  modelResponse  = 0;
            long   exampleCount   = 0;

            badExampleCount = 0;
            int count = 0;

            using (var cursor = data.Data.GetRowCursor(pred))
            {
                var labelGetter  = RowCursorUtils.GetLabelGetter(cursor, data.Schema.Label.Value.Index);
                var weightGetter = data.Schema.Weight?.Index is int weightIdx?cursor.GetGetter <float>(weightIdx) : null;

                for (int f = 0; f < featureColumns.Count; f++)
                {
                    getters[f] = cursor.GetGetter <VBuffer <float> >(featureColumns[f].Index);
                }
                while (cursor.MoveNext())
                {
                    labelGetter(ref label);
                    weightGetter?.Invoke(ref weight);
                    float annihilation = label - label + weight - weight;
                    if (!FloatUtils.IsFinite(annihilation))
                    {
                        badExampleCount++;
                        continue;
                    }
                    if (!FieldAwareFactorizationMachineUtils.LoadOneExampleIntoBuffer(getters, buffer, norm, ref count,
                                                                                      featureFieldBuffer, featureIndexBuffer, featureValueBuffer))
                    {
                        badExampleCount++;
                        continue;
                    }
                    FieldAwareFactorizationMachineInterface.CalculateIntermediateVariables(featureColumns.Count, latentDimAligned, count,
                                                                                           featureFieldBuffer, featureIndexBuffer, featureValueBuffer, linearWeights, latentWeightsAligned, latentSum, ref modelResponse);
                    loss += weight * CalculateLoss(label, modelResponse);
                    exampleCount++;
                }
            }
            return(loss / exampleCount);
        }
Пример #5
0
        private unsafe AlignedArray <T> Generate(Func <int, T>?pred = null, int length = SfmtPrimitive.MinArraySize64,
                                                 int alignment      = 16)
        {
            if (pred is null)
            {
                pred = ValueSetter;
            }

            var ret = new AlignedArray <T>(length, alignment);

            for (var i = 0; i < length; i++)
            {
                ret.StatusUncheckedPointer[i] = pred(i);
            }

            return(ret);
        }
        internal FieldAwareFactorizationMachineModelParameters(IHostEnvironment env, bool norm, int fieldCount, int featureCount, int latentDim,
                                                               float[] linearWeights, AlignedArray latentWeightsAligned) : base(env, LoaderSignature)
        {
            Host.Assert(fieldCount > 0);
            Host.Assert(featureCount > 0);
            Host.Assert(latentDim > 0);
            Host.Assert(Utils.Size(linearWeights) == featureCount);
            LatentDimAligned = FieldAwareFactorizationMachineUtils.GetAlignedVectorLength(latentDim);
            Host.Assert(latentWeightsAligned.Size == checked (featureCount * fieldCount * LatentDimAligned));

            _norm                 = norm;
            FieldCount            = fieldCount;
            FeatureCount          = featureCount;
            LatentDimension       = latentDim;
            _linearWeights        = linearWeights;
            _latentWeightsAligned = latentWeightsAligned;
        }
Пример #7
0
        private ValueGetter <VBuffer <Float> > GetterFromFloatType(IRow input, int iinfo)
        {
            var getSrc = GetSrcGetter <Float>(input, iinfo);
            var src    = default(Float);

            var featuresAligned = new AlignedArray(RoundUp(1, _cfltAlign), CpuMathUtils.GetVectorAlignment());
            var productAligned  = new AlignedArray(RoundUp(_transformInfos[iinfo].NewDim, _cfltAlign), CpuMathUtils.GetVectorAlignment());

            var oneDimensionalVector = new VBuffer <Float>(1, new Float[] { 0 });

            return
                ((ref VBuffer <Float> dst) =>
            {
                getSrc(ref src);
                oneDimensionalVector.Values[0] = src;
                TransformFeatures(Host, ref oneDimensionalVector, ref dst, _transformInfos[iinfo], featuresAligned, productAligned);
            });
        }
Пример #8
0
        public static void FillArray32(SfmtPrimitiveState sfmt, AlignedArray <uint> array, int size)
        {
            if (sfmt.Index != N32)
            {
                throw new ArgumentException($"{nameof(sfmt)} internal state error.");
            }
            if (size % 4 != 0)
            {
                throw new ArgumentOutOfRangeException($"{nameof(size)} requires to be a multiple of four.");
            }

            if (size <= N32)
            {
                throw new ArgumentOutOfRangeException($"{nameof(size)} requires to be at least {N32}");
            }

            GenRandArray(sfmt, (IntegerW128 *)array.StatusUncheckedPointer, size / 4);
        }
        DataViewRow ISchemaBoundRowMapper.GetRow(DataViewRow input, IEnumerable <DataViewSchema.Column> activeColumns)
        {
            var latentSum          = new AlignedArray(_pred.FieldCount * _pred.FieldCount * _pred.LatentDimAligned, 16);
            var featureBuffer      = new VBuffer <float>();
            var featureFieldBuffer = new int[_pred.FeatureCount];
            var featureIndexBuffer = new int[_pred.FeatureCount];
            var featureValueBuffer = new float[_pred.FeatureCount];
            var inputGetters       = new ValueGetter <VBuffer <float> > [_pred.FieldCount];

            var activeIndices = activeColumns.Select(c => c.Index).ToArray();
            var active0       = activeIndices.Contains(0);
            var active1       = activeIndices.Contains(1);

            if (active0 || active1)
            {
                for (int f = 0; f < _pred.FieldCount; f++)
                {
                    inputGetters[f] = input.GetGetter <VBuffer <float> >(input.Schema[_inputColumnIndexes[f]]);
                }
            }

            var getters = new Delegate[2];

            if (active0)
            {
                ValueGetter <float> responseGetter = (ref float value) =>
                {
                    value = _pred.CalculateResponse(inputGetters, featureBuffer, featureFieldBuffer, featureIndexBuffer, featureValueBuffer, latentSum);
                };
                getters[0] = responseGetter;
            }
            if (active1)
            {
                ValueGetter <float> probGetter = (ref float value) =>
                {
                    value = _pred.CalculateResponse(inputGetters, featureBuffer, featureFieldBuffer, featureIndexBuffer, featureValueBuffer, latentSum);
                    value = MathUtils.SigmoidSlow(value);
                };
                getters[1] = probGetter;
            }

            return(new SimpleRow(OutputSchema, input, getters));
        }
Пример #10
0
        public void Setup()
        {
            src       = new float[Length];
            dst       = new float[Length];
            src1      = new float[Length];
            src2      = new float[Length];
            original  = new float[Length];
            result    = new float[Length];
            idx       = new int[IndexLength];
            matrixIdx = new int[MatrixIndexLength];

            _seed = GetSeed();
            Random rand = new Random(_seed);

            for (int i = 0; i < Length; i++)
            {
                src[i]      = NextFloat(rand, ExponentRange);
                dst[i]      = NextFloat(rand, ExponentRange);
                original[i] = dst[i];
                result[i]   = dst[i];
                src1[i]     = NextFloat(rand, ExponentRange);
                src2[i]     = NextFloat(rand, ExponentRange);
            }

            for (int i = 0; i < IndexLength; i++)
            {
                idx[i] = rand.Next(0, Length);
            }

            for (int i = 0; i < MatrixIndexLength; i++)
            {
                matrixIdx[i] = rand.Next(0, 1000);
            }

            testMatrixAligned = new AlignedArray(matrixLength * matrixLength, align);
            testMatrixAligned.CopyFrom(src.AsSpan(0, (matrixLength - 1) * (matrixLength - 1)));

            testSrcVectorAligned = new AlignedArray(matrixLength, align);
            testSrcVectorAligned.CopyFrom(src1.AsSpan(0, matrixLength - 1)); // odd input

            testDstVectorAligned = new AlignedArray(matrixLength, align);
            testDstVectorAligned.CopyFrom(dst.AsSpan(0, matrixLength));
        }
Пример #11
0
        public void SfmtFillArray64Test()
        {
            const int size = 1024;

            var sfmt = new SfmtPrimitiveState();

            var buffer = new AlignedArray <ulong>(size, 16);


            InitGenRand(sfmt, 1234);
            FillArray64(sfmt, buffer, size);

            {
                var expected = File.ReadLines("./Data/AfterStateFill64.txt").Select(x => uint.Parse(x)).ToArray();
                var actual   = new Span <uint>(sfmt.State, N32);
                actual.Length.Is(expected.Length);

                for (var i = 0; i < actual.Length; i++)
                {
                    actual[i].Is(expected[i]);
                }
            }

            {
                var actA = buffer.ToArray();
                FillArray64(sfmt, buffer, size);

                var actB = buffer.ToArray();

                var act = actA.Concat(actB).ToArray();

                var expected = File.ReadLines("./Data/Init1234Fill64.txt").Select(x => ulong.Parse(x)).ToArray();

                act.Length.Is(expected.Length);

                for (var i = 0; i < act.Length; i++)
                {
                    act[i].Is(expected[i]);
                }
            }
        }
Пример #12
0
        public void SfmtFillArray32Test()
        {
            const int size = 1024;


            using var sfmt = new SfmtPrimitiveState();

            var buffer = new AlignedArray <uint>(size, 16);


            InitGenRand(sfmt, 1234);
            FillArray32(sfmt, buffer, size);

            var expected = File.ReadLines("./Data/AfterStateFill32.txt").Select(x => uint.Parse(x)).ToArray();

            var actual = new Span <uint>(sfmt.State, N32);

            actual.Length.Is(expected.Length);

            for (var i = 0; i < actual.Length; i++)
            {
                actual[i].Is(expected[i]);
            }


            var actualA = buffer.ToArray();

            FillArray32(sfmt, buffer, size);
            var actualB = buffer.ToArray();

            var actualM = actualA.Concat(actualB).ToArray();

            expected = File.ReadLines("./Data/init1234Fill32.txt").Select(x => uint.Parse(x)).ToArray();

            actualM.Length.Is(expected.Length);

            for (var i = 0; i < actualM.Length; i++)
            {
                actualM[i].Is(expected[i]);
            }
        }
Пример #13
0
        public void SfmtInitArrayLongKey()
        {
            const int size = 1024;
            var       key  = Enumerable.Range(0, 32).Select(x => (uint)(x + 42)).ToArray();

            using var sfmt = new SfmtPrimitiveState();

            InitByArray(sfmt, key);

            var expected = File.ReadLines("./Data/LongKey.txt").Select(x => ulong.Parse(x)).ToArray();

            var p = new AlignedArray <ulong>(size, 16);


            FillArray64(sfmt, p, size);

            for (var i = 0; i < size; i++)
            {
                p[i].Is(expected[i]);
            }
        }
Пример #14
0
        public static void CalculateIntermediateVariables(int fieldCount, int latentDim, int count, int[] fieldIndices, int[] featureIndices, float[] featureValues,
                                                          float[] linearWeights, AlignedArray latentWeights, AlignedArray latentSum, ref float response)
        {
            Contracts.AssertNonEmpty(fieldIndices);
            Contracts.AssertNonEmpty(featureValues);
            Contracts.AssertNonEmpty(featureIndices);
            Contracts.AssertNonEmpty(linearWeights);
            Contracts.Assert(Compat(latentWeights));
            Contracts.Assert(Compat(latentSum));

            unsafe
            {
                fixed(int *pf = &fieldIndices[0])
                fixed(int *pi   = &featureIndices[0])
                fixed(float *px = &featureValues[0])
                fixed(float *pw = &linearWeights[0])
                fixed(float *pv = &latentWeights.Items[0])
                fixed(float *pq = &latentSum.Items[0])
                fixed(float *pr = &response)
                CalculateIntermediateVariablesNative(fieldCount, latentDim, count, pf, pi, px, pw, Ptr(latentWeights, pv), Ptr(latentSum, pq), pr);
            }
        }
Пример #15
0
        public Row GetRow(Row input, Func <int, bool> predicate)
        {
            var latentSum          = new AlignedArray(_pred.FieldCount * _pred.FieldCount * _pred.LatentDimAligned, 16);
            var featureBuffer      = new VBuffer <float>();
            var featureFieldBuffer = new int[_pred.FeatureCount];
            var featureIndexBuffer = new int[_pred.FeatureCount];
            var featureValueBuffer = new float[_pred.FeatureCount];
            var inputGetters       = new ValueGetter <VBuffer <float> > [_pred.FieldCount];

            if (predicate(0) || predicate(1))
            {
                for (int f = 0; f < _pred.FieldCount; f++)
                {
                    inputGetters[f] = input.GetGetter <VBuffer <float> >(_inputColumnIndexes[f]);
                }
            }

            var getters = new Delegate[2];

            if (predicate(0))
            {
                ValueGetter <float> responseGetter = (ref float value) =>
                {
                    value = _pred.CalculateResponse(inputGetters, featureBuffer, featureFieldBuffer, featureIndexBuffer, featureValueBuffer, latentSum);
                };
                getters[0] = responseGetter;
            }
            if (predicate(1))
            {
                ValueGetter <float> probGetter = (ref float value) =>
                {
                    value = _pred.CalculateResponse(inputGetters, featureBuffer, featureFieldBuffer, featureIndexBuffer, featureValueBuffer, latentSum);
                    value = MathUtils.SigmoidSlow(value);
                };
                getters[1] = probGetter;
            }

            return(new SimpleRow(OutputSchema, input, getters));
        }
            public TransformInfo(IHost host, ApproximatedKernelMappingEstimator.ColumnOptions column, int d, float avgDist)
            {
                Contracts.AssertValue(host);

                SrcDim = d;
                NewDim = column.Rank;
                host.CheckUserArg(NewDim > 0, nameof(column.Rank));
                _useSin = column.UseCosAndSinBases;
                var seed = column.Seed;
                _rand = seed.HasValue ? RandomUtils.Create(seed) : RandomUtils.Create(host.Rand);
                _state = _rand.GetState();

                var generator = column.Generator;
                _matrixGenerator = generator.GetRandomNumberGenerator(avgDist);

                int roundedUpD = RoundUp(NewDim, _cfltAlign);
                int roundedUpNumFeatures = RoundUp(SrcDim, _cfltAlign);
                RndFourierVectors = new AlignedArray(roundedUpD * roundedUpNumFeatures, CpuMathUtils.GetVectorAlignment());
                RotationTerms = _useSin ? null : new AlignedArray(roundedUpD, CpuMathUtils.GetVectorAlignment());

                InitializeFourierCoefficients(roundedUpNumFeatures, roundedUpD);
            }
        /// <summary>
        /// Initialize model parameters with a trained model.
        /// </summary>
        /// <param name="env">The host environment</param>
        /// <param name="norm">True if user wants to normalize feature vector to unit length.</param>
        /// <param name="fieldCount">The number of fileds, which is the symbol `m` in the doc: https://github.com/wschin/fast-ffm/blob/master/fast-ffm.pdf </param>
        /// <param name="featureCount">The number of features, which is the symbol `n` in the doc: https://github.com/wschin/fast-ffm/blob/master/fast-ffm.pdf </param>
        /// <param name="latentDim">The latent dimensions, which is the length of `v_{j, f}` in the doc: https://github.com/wschin/fast-ffm/blob/master/fast-ffm.pdf </param>
        /// <param name="linearWeights">The linear coefficients of the features, which is the symbol `w` in the doc: https://github.com/wschin/fast-ffm/blob/master/fast-ffm.pdf </param>
        /// <param name="latentWeights">Latent representation of each feature. Note that one feature may have <see cref="FieldCount"/> latent vectors
        /// and each latent vector contains <see cref="LatentDimension"/> values. In the f-th field, the j-th feature's latent vector, `v_{j, f}` in the doc
        /// https://github.com/wschin/fast-ffm/blob/master/fast-ffm.pdf, starts at latentWeights[j * fieldCount * latentDim + f * latentDim].
        /// The k-th element in v_{j, f} is latentWeights[j * fieldCount * latentDim + f * latentDim + k]. The size of the array must be featureCount x fieldCount x latentDim.</param>
        internal FieldAwareFactorizationMachineModelParameters(IHostEnvironment env, bool norm, int fieldCount, int featureCount, int latentDim,
                                                               float[] linearWeights, float[] latentWeights) : base(env, LoaderSignature)
        {
            Host.Assert(fieldCount > 0);
            Host.Assert(featureCount > 0);
            Host.Assert(latentDim > 0);
            Host.Assert(Utils.Size(linearWeights) == featureCount);
            LatentDimAligned = FieldAwareFactorizationMachineUtils.GetAlignedVectorLength(latentDim);
            Host.Assert(Utils.Size(latentWeights) == checked (featureCount * fieldCount * LatentDimAligned));

            _norm           = norm;
            FieldCount      = fieldCount;
            FeatureCount    = featureCount;
            LatentDimension = latentDim;
            _linearWeights  = linearWeights;

            _latentWeightsAligned = new AlignedArray(FeatureCount * FieldCount * LatentDimAligned, 16);

            for (int j = 0; j < FeatureCount; j++)
            {
                for (int f = 0; f < FieldCount; f++)
                {
                    int index        = j * FieldCount * LatentDimension + f * LatentDimension;
                    int indexAligned = j * FieldCount * LatentDimAligned + f * LatentDimAligned;
                    for (int k = 0; k < LatentDimAligned; k++)
                    {
                        if (k < LatentDimension)
                        {
                            _latentWeightsAligned[indexAligned + k] = latentWeights[index + k];
                        }
                        else
                        {
                            _latentWeightsAligned[indexAligned + k] = 0;
                        }
                    }
                }
            }
        }
Пример #18
0
        public static void CalculateGradientAndUpdate(float lambdaLinear, float lambdaLatent, float learningRate, int fieldCount, int latentDim,
                                                      float weight, int count, int[] fieldIndices, int[] featureIndices, float[] featureValues, AlignedArray latentSum, float slope,
                                                      float[] linearWeights, AlignedArray latentWeights, float[] linearAccumulatedSquaredGrads, AlignedArray latentAccumulatedSquaredGrads)
        {
            Contracts.AssertNonEmpty(fieldIndices);
            Contracts.AssertNonEmpty(featureIndices);
            Contracts.AssertNonEmpty(featureValues);
            Contracts.Assert(Compat(latentSum));
            Contracts.AssertNonEmpty(linearWeights);
            Contracts.Assert(Compat(latentWeights));
            Contracts.AssertNonEmpty(linearAccumulatedSquaredGrads);
            Contracts.Assert(Compat(latentAccumulatedSquaredGrads));

            unsafe
            {
                fixed(int *pf = &fieldIndices[0])
                fixed(int *pi    = &featureIndices[0])
                fixed(float *px  = &featureValues[0])
                fixed(float *pq  = &latentSum.Items[0])
                fixed(float *pw  = &linearWeights[0])
                fixed(float *pv  = &latentWeights.Items[0])
                fixed(float *phw = &linearAccumulatedSquaredGrads[0])
                fixed(float *phv = &latentAccumulatedSquaredGrads.Items[0])
                {
                    if (Avx.IsSupported)
                    {
                        AvxIntrinsics.CalculateGradientAndUpdate(pf, pi, px, Ptr(latentSum, pq), pw, Ptr(latentWeights, pv),
                                                                 phw, Ptr(latentAccumulatedSquaredGrads, phv), lambdaLinear, lambdaLatent, learningRate, fieldCount, latentDim, weight, count, slope);
                    }
                    else
                    {
                        CalculateGradientAndUpdateNative(lambdaLinear, lambdaLatent, learningRate, fieldCount, latentDim, weight, count, pf, pi, px,
                                                         Ptr(latentSum, pq), slope, pw, Ptr(latentWeights, pv), phw, Ptr(latentAccumulatedSquaredGrads, phv));
                    }
                }
            }
        }
Пример #19
0
            public TransformInfo(IHostEnvironment env, ModelLoadContext ctx, int colValueCount, string directoryName)
            {
                env.AssertValue(env);
                env.Assert(colValueCount > 0);

                // *** Binary format ***
                // int: d (number of untransformed features)
                // int: NewDim (number of transformed features)
                // bool: UseSin
                // uint[4]: the seeds for the pseudo random number generator.

                SrcDim = ctx.Reader.ReadInt32();
                env.CheckDecode(SrcDim == colValueCount);

                NewDim = ctx.Reader.ReadInt32();
                env.CheckDecode(NewDim > 0);

                _useSin = ctx.Reader.ReadBoolByte();

                var length = ctx.Reader.ReadInt32();

                env.CheckDecode(length == 4);
                _state = TauswortheHybrid.State.Load(ctx.Reader);
                _rand  = new TauswortheHybrid(_state);

                env.CheckDecode(ctx.Repository != null &&
                                ctx.LoadModelOrNull <IFourierDistributionSampler, SignatureLoadModel>(env, out _matrixGenerator, directoryName));

                // initialize the transform matrix
                int roundedUpD           = RoundUp(NewDim, _cfltAlign);
                int roundedUpNumFeatures = RoundUp(SrcDim, _cfltAlign);

                RndFourierVectors = new AlignedArray(roundedUpD * roundedUpNumFeatures, CpuMathUtils.GetVectorAlignment());
                RotationTerms     = _useSin ? null : new AlignedArray(roundedUpD, CpuMathUtils.GetVectorAlignment());
                InitializeFourierCoefficients(roundedUpNumFeatures, roundedUpD);
            }
Пример #20
0
        public CpuMathUtilsUnitTests()
        {
            // Padded array whose length is a multiple of 4
            float[] testArray1 = new float[16] {
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f, 1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f
            };
            // Unpadded array whose length is not a multiple of 4.
            float[] testArray2 = new float[15] {
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f, 1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f
            };
            _testArrays     = new float[][] { testArray1, testArray2 };
            _testIndexArray = new int[9] {
                0, 2, 5, 6, 8, 11, 12, 13, 14
            };
            _comparer       = new FloatEqualityComparer();
            _matMulComparer = new FloatEqualityComparerForMatMul();

            // Padded matrices whose dimensions are multiples of 8
            float[] testMatrix1 = new float[8 * 8] {
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f,
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f,
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f,
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f,
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f,
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f,
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f,
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f
            };
            float[] testMatrix2 = new float[8 * 16];

            for (int i = 0; i < testMatrix2.Length; i++)
            {
                testMatrix2[i] = i + 1;
            }

            AlignedArray testMatrixAligned1 = new AlignedArray(8 * 8, _vectorAlignment);
            AlignedArray testMatrixAligned2 = new AlignedArray(8 * 16, _vectorAlignment);

            testMatrixAligned1.CopyFrom(testMatrix1, 0, testMatrix1.Length);
            testMatrixAligned2.CopyFrom(testMatrix2, 0, testMatrix2.Length);

            _testMatrices = new AlignedArray[] { testMatrixAligned1, testMatrixAligned2 };

            // Padded source vectors whose dimensions are multiples of 8
            float[] testSrcVector1 = new float[8] {
                1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f
            };
            float[] testSrcVector2 = new float[16] {
                1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f, 9f, 10f, 11f, 12f, 13f, 14f, 15f, 16f
            };

            AlignedArray testSrcVectorAligned1 = new AlignedArray(8, _vectorAlignment);
            AlignedArray testSrcVectorAligned2 = new AlignedArray(16, _vectorAlignment);

            testSrcVectorAligned1.CopyFrom(testSrcVector1, 0, testSrcVector1.Length);
            testSrcVectorAligned2.CopyFrom(testSrcVector2, 0, testSrcVector2.Length);

            _testSrcVectors = new AlignedArray[] { testSrcVectorAligned1, testSrcVectorAligned2 };

            // Padded destination vectors whose dimensions are multiples of 8
            float[] testDstVector1 = new float[8] {
                0f, 1f, 2f, 3f, 4f, 5f, 6f, 7f
            };
            float[] testDstVector2 = new float[16] {
                0f, 1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f, 9f, 10f, 11f, 12f, 13f, 14f, 15f
            };

            AlignedArray testDstVectorAligned1 = new AlignedArray(8, _vectorAlignment);
            AlignedArray testDstVectorAligned2 = new AlignedArray(16, _vectorAlignment);

            testDstVectorAligned1.CopyFrom(testDstVector1, 0, testDstVector1.Length);
            testDstVectorAligned2.CopyFrom(testDstVector2, 0, testDstVector2.Length);

            _testDstVectors = new AlignedArray[] { testDstVectorAligned1, testDstVectorAligned2 };
        }
Пример #21
0
        private FieldAwareFactorizationMachineModelParameters TrainCore(IChannel ch, IProgressChannel pch, RoleMappedData data,
                                                                        RoleMappedData validData = null, FieldAwareFactorizationMachineModelParameters predictor = null)
        {
            _host.AssertValue(ch);
            _host.AssertValue(pch);

            data.CheckBinaryLabel();
            var featureColumns    = data.Schema.GetColumns(RoleMappedSchema.ColumnRole.Feature);
            int fieldCount        = featureColumns.Count;
            int totalFeatureCount = 0;

            int[] fieldColumnIndexes = new int[fieldCount];
            for (int f = 0; f < fieldCount; f++)
            {
                var col = featureColumns[f];
                _host.Assert(!col.IsHidden);
                if (!(col.Type is VectorDataViewType vectorType) ||
                    !vectorType.IsKnownSize ||
                    vectorType.ItemType != NumberDataViewType.Single)
                {
                    throw ch.ExceptParam(nameof(data), "Training feature column '{0}' must be a known-size vector of Single, but has type: {1}.", col.Name, col.Type);
                }
                _host.Assert(vectorType.Size > 0);
                fieldColumnIndexes[f] = col.Index;
                totalFeatureCount    += vectorType.Size;
            }
            ch.Check(checked (totalFeatureCount * fieldCount * _latentDimAligned) <= Utils.ArrayMaxSize, "Latent dimension or the number of fields too large");
            if (predictor != null)
            {
                ch.Check(predictor.FeatureCount == totalFeatureCount, "Input model's feature count mismatches training feature count");
                ch.Check(predictor.LatentDimension == _latentDim, "Input model's latent dimension mismatches trainer's");
            }
            if (validData != null)
            {
                validData.CheckBinaryLabel();
                var validFeatureColumns = data.Schema.GetColumns(RoleMappedSchema.ColumnRole.Feature);
                _host.Assert(fieldCount == validFeatureColumns.Count);
                for (int f = 0; f < fieldCount; f++)
                {
                    var featCol      = featureColumns[f];
                    var validFeatCol = validFeatureColumns[f];
                    _host.Assert(featCol.Name == validFeatCol.Name);
                    _host.Assert(featCol.Type == validFeatCol.Type);
                }
            }
            bool shuffle = _shuffle;

            if (shuffle && !data.Data.CanShuffle)
            {
                ch.Warning("Training data does not support shuffling, so ignoring request to shuffle");
                shuffle = false;
            }
            var rng                = shuffle ? _host.Rand : null;
            var featureGetters     = new ValueGetter <VBuffer <float> > [fieldCount];
            var featureBuffer      = new VBuffer <float>();
            var featureValueBuffer = new float[totalFeatureCount];
            var featureIndexBuffer = new int[totalFeatureCount];
            var featureFieldBuffer = new int[totalFeatureCount];
            var latentSum          = new AlignedArray(fieldCount * fieldCount * _latentDimAligned, 16);
            var metricNames        = new List <string>()
            {
                "Training-loss"
            };

            if (validData != null)
            {
                metricNames.Add("Validation-loss");
            }
            int    iter                 = 0;
            long   exampleCount         = 0;
            long   badExampleCount      = 0;
            long   validBadExampleCount = 0;
            double loss                 = 0;
            double validLoss            = 0;

            pch.SetHeader(new ProgressHeader(metricNames.ToArray(), new string[] { "iterations", "examples" }), entry =>
            {
                entry.SetProgress(0, iter, _numIterations);
                entry.SetProgress(1, exampleCount);
            });

            var columns = data.Schema.Schema.Where(x => fieldColumnIndexes.Contains(x.Index)).ToList();

            columns.Add(data.Schema.Label.Value);
            if (data.Schema.Weight != null)
            {
                columns.Add(data.Schema.Weight.Value);
            }

            InitializeTrainingState(fieldCount, totalFeatureCount, predictor, out float[] linearWeights,
                                    out AlignedArray latentWeightsAligned, out float[] linearAccSqGrads, out AlignedArray latentAccSqGradsAligned);

            // refer to Algorithm 3 in https://github.com/wschin/fast-ffm/blob/master/fast-ffm.pdf
            while (iter++ < _numIterations)
            {
                using (var cursor = data.Data.GetRowCursor(columns, rng))
                {
                    var labelGetter  = RowCursorUtils.GetLabelGetter(cursor, data.Schema.Label.Value.Index);
                    var weightGetter = data.Schema.Weight?.Index is int weightIdx?RowCursorUtils.GetGetterAs <float>(NumberDataViewType.Single, cursor, weightIdx) : null;

                    for (int i = 0; i < fieldCount; i++)
                    {
                        featureGetters[i] = cursor.GetGetter <VBuffer <float> >(cursor.Schema[fieldColumnIndexes[i]]);
                    }
                    loss            = 0;
                    exampleCount    = 0;
                    badExampleCount = 0;
                    while (cursor.MoveNext())
                    {
                        float label         = 0;
                        float weight        = 1;
                        int   count         = 0;
                        float modelResponse = 0;
                        labelGetter(ref label);
                        weightGetter?.Invoke(ref weight);
                        float annihilation = label - label + weight - weight;
                        if (!FloatUtils.IsFinite(annihilation))
                        {
                            badExampleCount++;
                            continue;
                        }
                        if (!FieldAwareFactorizationMachineUtils.LoadOneExampleIntoBuffer(featureGetters, featureBuffer, _norm, ref count,
                                                                                          featureFieldBuffer, featureIndexBuffer, featureValueBuffer))
                        {
                            badExampleCount++;
                            continue;
                        }

                        // refer to Algorithm 1 in [3] https://github.com/wschin/fast-ffm/blob/master/fast-ffm.pdf
                        FieldAwareFactorizationMachineInterface.CalculateIntermediateVariables(fieldCount, _latentDimAligned, count,
                                                                                               featureFieldBuffer, featureIndexBuffer, featureValueBuffer, linearWeights, latentWeightsAligned, latentSum, ref modelResponse);
                        var slope = CalculateLossSlope(label, modelResponse);

                        // refer to Algorithm 2 in [3] https://github.com/wschin/fast-ffm/blob/master/fast-ffm.pdf
                        FieldAwareFactorizationMachineInterface.CalculateGradientAndUpdate(_lambdaLinear, _lambdaLatent, _learningRate, fieldCount, _latentDimAligned, weight, count,
                                                                                           featureFieldBuffer, featureIndexBuffer, featureValueBuffer, latentSum, slope, linearWeights, latentWeightsAligned, linearAccSqGrads, latentAccSqGradsAligned);
                        loss += weight * CalculateLoss(label, modelResponse);
                        exampleCount++;
                    }
                    loss /= exampleCount;
                }

                if (_verbose)
                {
                    if (validData == null)
                    {
                        pch.Checkpoint(loss, iter, exampleCount);
                    }
                    else
                    {
                        validLoss = CalculateAvgLoss(ch, validData, _norm, linearWeights, latentWeightsAligned, _latentDimAligned, latentSum,
                                                     featureFieldBuffer, featureIndexBuffer, featureValueBuffer, featureBuffer, ref validBadExampleCount);
                        pch.Checkpoint(loss, validLoss, iter, exampleCount);
                    }
                }
            }
            if (badExampleCount != 0)
            {
                ch.Warning($"Skipped {badExampleCount} examples with bad label/weight/features in training set");
            }
            if (validBadExampleCount != 0)
            {
                ch.Warning($"Skipped {validBadExampleCount} examples with bad label/weight/features in validation set");
            }

            return(new FieldAwareFactorizationMachineModelParameters(_host, _norm, fieldCount, totalFeatureCount, _latentDim, linearWeights, latentWeightsAligned));
        }
Пример #22
0
 public static void ZeroMatrixItems(AlignedArray dst, int ccol, int cfltRow, int[] indices) => SseUtils.ZeroMatrixItems(dst, ccol, cfltRow, indices);
 private static bool Compat(AlignedArray a)
 {
     Contracts.AssertValue(a);
     Contracts.Assert(a.Size > 0);
     return(a.CbAlign == CbAlign);
 }
Пример #24
0
        public CpuMathUtilsUnitTests()
        {
            // Padded array whose length is a multiple of 4
            float[] testArray1 = new float[8] {
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f
            };
            // Unpadded array whose length is not a multiple of 4.
            float[] testArray2 = new float[7] {
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f
            };
            testArrays     = new float[][] { testArray1, testArray2 };
            testIndexArray = new int[4] {
                0, 2, 5, 6
            };
            comparer = new FloatEqualityComparer();

            // Padded matrices whose dimensions are multiples of 4
            float[] testMatrix1 = new float[4 * 4] {
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f,
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f
            };
            float[] testMatrix2 = new float[4 * 8];

            for (int i = 0; i < testMatrix2.Length; i++)
            {
                testMatrix2[i] = i + 1;
            }

            AlignedArray testMatrixAligned1 = new AlignedArray(4 * 4, SseCbAlign);
            AlignedArray testMatrixAligned2 = new AlignedArray(4 * 8, SseCbAlign);

            testMatrixAligned1.CopyFrom(testMatrix1, 0, testMatrix1.Length);
            testMatrixAligned2.CopyFrom(testMatrix2, 0, testMatrix2.Length);

            testMatrices = new AlignedArray[] { testMatrixAligned1, testMatrixAligned2 };

            // Padded source vectors whose dimensions are multiples of 4
            float[] testSrcVector1 = new float[4] {
                1f, 2f, 3f, 4f
            };
            float[] testSrcVector2 = new float[8] {
                1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f
            };

            AlignedArray testSrcVectorAligned1 = new AlignedArray(4, SseCbAlign);
            AlignedArray testSrcVectorAligned2 = new AlignedArray(8, SseCbAlign);

            testSrcVectorAligned1.CopyFrom(testSrcVector1, 0, testSrcVector1.Length);
            testSrcVectorAligned2.CopyFrom(testSrcVector2, 0, testSrcVector2.Length);

            testSrcVectors = new AlignedArray[] { testSrcVectorAligned1, testSrcVectorAligned2 };

            // Padded destination vectors whose dimensions are multiples of 4
            float[] testDstVector1 = new float[4] {
                0f, 1f, 2f, 3f
            };
            float[] testDstVector2 = new float[8] {
                0f, 1f, 2f, 3f, 4f, 5f, 6f, 7f
            };

            AlignedArray testDstVectorAligned1 = new AlignedArray(4, SseCbAlign);
            AlignedArray testDstVectorAligned2 = new AlignedArray(8, SseCbAlign);

            testDstVectorAligned1.CopyFrom(testDstVector1, 0, testDstVector1.Length);
            testDstVectorAligned2.CopyFrom(testDstVector2, 0, testDstVector2.Length);

            testDstVectors = new AlignedArray[] { testDstVectorAligned1, testDstVectorAligned2 };
        }
 internal void CopyLatentWeightsTo(AlignedArray latentWeights)
 {
     Host.AssertValue(_latentWeightsAligned);
     Host.AssertValue(latentWeights);
     latentWeights.CopyFrom(_latentWeightsAligned);
 }
        internal float CalculateResponse(ValueGetter <VBuffer <float> >[] getters, VBuffer <float> featureBuffer,
                                         int[] featureFieldBuffer, int[] featureIndexBuffer, float[] featureValueBuffer, AlignedArray latentSum)
        {
            int   count         = 0;
            float modelResponse = 0;

            FieldAwareFactorizationMachineUtils.LoadOneExampleIntoBuffer(getters, featureBuffer, _norm, ref count,
                                                                         featureFieldBuffer, featureIndexBuffer, featureValueBuffer);
            FieldAwareFactorizationMachineInterface.CalculateIntermediateVariables(FieldCount, LatentDimAligned, count,
                                                                                   featureFieldBuffer, featureIndexBuffer, featureValueBuffer, _linearWeights, _latentWeightsAligned, latentSum, ref modelResponse);
            return(modelResponse);
        }
Пример #27
0
 public static void MatTimesSrc(bool tran, bool add, AlignedArray mat, AlignedArray src, AlignedArray dst, int crun) => SseUtils.MatTimesSrc(tran, add, mat, src, dst, crun);
Пример #28
0
        static CpuMathUtilsUnitTests()
        {
            // Padded array whose length is a multiple of 4
            float[] testArray1 = new float[16] {
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f, 1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f
            };
            // Unpadded array whose length is not a multiple of 4.
            float[] testArray2 = new float[15] {
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f, 1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f
            };
            _testArrays     = new float[][] { testArray1, testArray2 };
            _testIndexArray = new int[9] {
                0, 2, 5, 6, 8, 11, 12, 13, 14
            };
            _comparer       = new FloatEqualityComparer();
            _matMulComparer = new FloatEqualityComparerForMatMul();

            // Padded matrices whose dimensions are multiples of 8
            float[] testMatrix1 = new float[8 * 8] {
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f,
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f,
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f,
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f,
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f,
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f,
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f,
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f
            };
            float[] testMatrix2 = new float[8 * 16];

            for (int i = 0; i < testMatrix2.Length; i++)
            {
                testMatrix2[i] = i + 1;
            }

            AlignedArray testMatrixAligned1 = new AlignedArray(8 * 8, _vectorAlignment);
            AlignedArray testMatrixAligned2 = new AlignedArray(8 * 16, _vectorAlignment);

            testMatrixAligned1.CopyFrom(testMatrix1);
            testMatrixAligned2.CopyFrom(testMatrix2);

            _testMatrices = new AlignedArray[] { testMatrixAligned1, testMatrixAligned2 };

            // Padded source vectors whose dimensions are multiples of 8
            float[] testSrcVector1 = new float[8] {
                1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f
            };
            float[] testSrcVector2 = new float[16] {
                1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f, 9f, 10f, 11f, 12f, 13f, 14f, 15f, 16f
            };

            AlignedArray testSrcVectorAligned1 = new AlignedArray(8, _vectorAlignment);
            AlignedArray testSrcVectorAligned2 = new AlignedArray(16, _vectorAlignment);

            testSrcVectorAligned1.CopyFrom(testSrcVector1);
            testSrcVectorAligned2.CopyFrom(testSrcVector2);

            _testSrcVectors = new AlignedArray[] { testSrcVectorAligned1, testSrcVectorAligned2 };

            // Padded destination vectors whose dimensions are multiples of 8
            float[] testDstVector1 = new float[8] {
                0f, 1f, 2f, 3f, 4f, 5f, 6f, 7f
            };
            float[] testDstVector2 = new float[16] {
                0f, 1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f, 9f, 10f, 11f, 12f, 13f, 14f, 15f
            };

            AlignedArray testDstVectorAligned1 = new AlignedArray(8, _vectorAlignment);
            AlignedArray testDstVectorAligned2 = new AlignedArray(16, _vectorAlignment);

            testDstVectorAligned1.CopyFrom(testDstVector1);
            testDstVectorAligned2.CopyFrom(testDstVector2);

            _testDstVectors = new AlignedArray[] { testDstVectorAligned1, testDstVectorAligned2 };

#if NETCOREAPP3_0
            DisableAvxEnvironmentVariables = new Dictionary <string, string>()
            {
                { disableAvx, "0" }
            };

            DisableAvxAndSseEnvironmentVariables = new Dictionary <string, string>()
            {
                { disableAvx, "0" },
                { disableSse, "0" }
            };
#endif
        }
Пример #29
0
        static CpuMathUtilsUnitTests()
        {
            // Padded array whose length is a multiple of 4
            float[] testArray1 = new float[32] {
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f, 1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f, 1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f, 9f, 10f, 11f, 12f, 13f, 14f, 15f, 16f
            };
            // Unpadded array whose length is not a multiple of 4.
            float[] testArray2 = new float[30] {
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f, 1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f, 9f, 10f, 11f, 12f, 13f, 14f, 15f
            };
            // Small Input Size Array
            float[] testArray3 = new float[15] {
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f, 1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f
            };
            _testArrays     = new float[][] { testArray1, testArray2, testArray3 };
            _testIndexArray = new int[18] {
                0, 2, 5, 6, 8, 11, 12, 13, 14, 16, 18, 21, 22, 24, 26, 27, 28, 29
            };
            _comparer       = new FloatEqualityComparer();
            _matMulComparer = new FloatEqualityComparerForMatMul();

            // Padded matrices whose dimensions are multiples of 8
            float[] testMatrix1 = new float[8 * 8] {
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f,
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f,
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f,
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f,
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f,
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f,
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f,
                1.96f, -2.38f, -9.76f, 13.84f, -106.37f, -26.93f, 32.45f, 3.29f
            };
            float[] testMatrix2 = new float[8 * 16];

            for (int i = 0; i < testMatrix2.Length; i++)
            {
                testMatrix2[i] = i + 1;
            }

            AlignedArray testMatrixAligned1 = new AlignedArray(8 * 8, _vectorAlignment);
            AlignedArray testMatrixAligned2 = new AlignedArray(8 * 16, _vectorAlignment);

            testMatrixAligned1.CopyFrom(testMatrix1);
            testMatrixAligned2.CopyFrom(testMatrix2);

            _testMatrices = new AlignedArray[] { testMatrixAligned1, testMatrixAligned2 };

            // Padded source vectors whose dimensions are multiples of 8
            float[] testSrcVector1 = new float[8] {
                1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f
            };
            float[] testSrcVector2 = new float[16] {
                1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f, 9f, 10f, 11f, 12f, 13f, 14f, 15f, 16f
            };

            AlignedArray testSrcVectorAligned1 = new AlignedArray(8, _vectorAlignment);
            AlignedArray testSrcVectorAligned2 = new AlignedArray(16, _vectorAlignment);

            testSrcVectorAligned1.CopyFrom(testSrcVector1);
            testSrcVectorAligned2.CopyFrom(testSrcVector2);

            _testSrcVectors = new AlignedArray[] { testSrcVectorAligned1, testSrcVectorAligned2 };

            // Padded destination vectors whose dimensions are multiples of 8
            float[] testDstVector1 = new float[8] {
                0f, 1f, 2f, 3f, 4f, 5f, 6f, 7f
            };
            float[] testDstVector2 = new float[16] {
                0f, 1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f, 9f, 10f, 11f, 12f, 13f, 14f, 15f
            };

            AlignedArray testDstVectorAligned1 = new AlignedArray(8, _vectorAlignment);
            AlignedArray testDstVectorAligned2 = new AlignedArray(16, _vectorAlignment);

            testDstVectorAligned1.CopyFrom(testDstVector1);
            testDstVectorAligned2.CopyFrom(testDstVector2);

            _testDstVectors = new AlignedArray[] { testDstVectorAligned1, testDstVectorAligned2 };

            if ((SkipAvxSse || IsNetCore) && !IsNetCore2OrOlder)
            {
                _disableAvxEnvironmentVariables = new Dictionary <string, string>()
                {
                    { _disableAvx, "0" }
                };

                _disableAvxAndSseEnvironmentVariables = new Dictionary <string, string>()
                {
                    { _disableAvx, "0" },
                    { _disableSse, "0" }
                };
            }
        }
Пример #30
0
 public static void MatTimesSrc(bool tran, bool add, AlignedArray mat, int[] rgposSrc, AlignedArray srcValues,
                                int posMin, int iposMin, int iposLim, AlignedArray dst, int crun) => SseUtils.MatTimesSrc(tran, add, mat, rgposSrc, srcValues, posMin, iposMin, iposLim, dst, crun);