예제 #1
0
    public FlatQueue(int size)
    {
        __k = new NativeArray <int>(size, Allocator.Persistent);
        __v = new NativeArray <Float>(size, Allocator.Persistent);

        pk     = (int *)NativeArrayUnsafeUtility.GetUnsafePtr(__k);
        pv     = (Float *)NativeArrayUnsafeUtility.GetUnsafePtr(__v);
        length = 0;
    }
예제 #2
0
        /// <summary>
        /// Predict with data. Calls the official API of XGBoost library.
        /// The function is protected against concurrent calls.
        /// </summary>
        /// <param name="data">Data as DMatrix</param>
        /// <param name="predictedValues">Results of the prediction</param>
        /// <param name="outputMargin">Whether to output the raw untransformed margin value.</param>
        /// <param name="ntreeLimit">Limit number of trees in the prediction; defaults to 0 (use all trees).</param>
        public void PredictN(DMatrix data, ref VBuffer <Float> predictedValues, bool outputMargin = true, int ntreeLimit = 0)
        {
            int optionMask = 0x00;

            if (outputMargin)
            {
                optionMask |= 0x01;
            }

            // REVIEW xadupre: see review in function PredictOneOff.

            ValidateFeatures(data);
            uint   length = 0;
            IntPtr ppreds = IntPtr.Zero;

            unsafe
            {
                // XGBoost uses OMP to parallelize the computation
                // of the output, each observation will be computed in a separate thread
                // and will use thread specific context.
                // Read https://blogs.msdn.microsoft.com/oldnewthing/20101122-00/?p=12233.
                // This function is called from multiple threads in C# for the evaluation with an iterator,
                // XGBoost parallelizes the computation for each evaluation (even if it is one in this case).
                // It chooses the number of thread with: nthread = omp_get_num_threads() (gbtree.cc)
                // The lock nullifies the parallelization done by Microsoft.ML.
                // There is no parallelization done by XGBoost on one observation.
                // Without the lock, the program fails (null pointer or something similar).
                // This item is a request: https://github.com/dmlc/xgboost/issues/1449.
                // As a consequence, this function is only used during training to evaluate the model on a batch of observations.
                // The reason is XGBoost is using caches in many places assuming XGBoost is called from one unique thread.
                // That explains this lock.
                // That function only relies on the offical API of XGBoost.
                lock (this)
                {
                    int t = WrappedXGBoostInterface.XGBoosterPredict(_handle, data.Handle,
                                                                     optionMask, (uint)ntreeLimit,
                                                                     ref length, ref ppreds);
                    WrappedXGBoostInterface.Check(t);
                }
                Float *preds = (Float *)ppreds;
                Contracts.Assert(0 < length && length < Int32.MaxValue);
                if (length > (ulong)predictedValues.Length)
                {
                    predictedValues = new VBuffer <Float>((int)length, new Float[length]);
                }
                WrappedXGBoostInterface.Copy((IntPtr)preds, 0, predictedValues.Values, (int)length);
            }
        }