Esempio n. 1
0
        /// <summary>
        /// Initializes the Brain with the Model that it will use when selecting actions for
        /// the agents
        /// </summary>
        /// <param name="model"> The Barracuda model to load </param>
        /// <param name="actionSpec"> Description of the actions for the Agent.</param>
        /// <param name="inferenceDevice"> Inference execution device. CPU is the fastest
        /// option for most of ML Agents models. </param>
        /// <param name="seed"> The seed that will be used to initialize the RandomNormal
        /// and Multinomial objects used when running inference.</param>
        /// <exception cref="UnityAgentsException">Throws an error when the model is null
        /// </exception>
        public ModelRunner(
            NNModel model,
            ActionSpec actionSpec,
            InferenceDevice inferenceDevice,
            int seed = 0)
        {
            Model barracudaModel;

            m_Model           = model;
            m_ModelName       = model.name;
            m_InferenceDevice = inferenceDevice;
            m_TensorAllocator = new TensorCachingAllocator();
            if (model != null)
            {
#if BARRACUDA_VERBOSE
                m_Verbose = true;
#endif

                D.logEnabled = m_Verbose;

                barracudaModel = ModelLoader.Load(model);
                WorkerFactory.Type executionDevice;
                switch (inferenceDevice)
                {
                case InferenceDevice.CPU:
                    executionDevice = WorkerFactory.Type.CSharp;
                    break;

                case InferenceDevice.GPU:
                    executionDevice = WorkerFactory.Type.ComputePrecompiled;
                    break;

                case InferenceDevice.Burst:
                    executionDevice = WorkerFactory.Type.CSharpBurst;
                    break;

                default:
                    executionDevice = WorkerFactory.Type.CSharpBurst;
                    break;
                }
                m_Engine = WorkerFactory.CreateWorker(executionDevice, barracudaModel, m_Verbose);
            }
            else
            {
                barracudaModel = null;
                m_Engine       = null;
            }

            m_InferenceInputs = barracudaModel.GetInputTensors();
            m_OutputNames     = barracudaModel.GetOutputNames();
            m_TensorGenerator = new TensorGenerator(
                seed, m_TensorAllocator, m_Memories, barracudaModel);
            m_TensorApplier = new TensorApplier(
                actionSpec, seed, m_TensorAllocator, m_Memories, barracudaModel);
            m_InputsByName     = new Dictionary <string, Tensor>();
            m_InferenceOutputs = new List <TensorProxy>();
        }
Esempio n. 2
0
        /// <summary>
        /// Initializes the Brain with the Model that it will use when selecting actions for
        /// the agents
        /// </summary>
        /// <param name="model"> The Barracuda model to load </param>
        /// <param name="brainParameters"> The parameters of the Brain used to generate the
        /// placeholder tensors </param>
        /// <param name="inferenceDevice"> Inference execution device. CPU is the fastest
        /// option for most of ML Agents models. </param>
        /// <param name="seed"> The seed that will be used to initialize the RandomNormal
        /// and Multinomial objects used when running inference.</param>
        /// <exception cref="UnityAgentsException">Throws an error when the model is null
        /// </exception>
        public ModelRunner(
            NNModel model,
            BrainParameters brainParameters,
            InferenceDevice inferenceDevice = InferenceDevice.CPU,
            int seed = 0)
        {
            Model barracudaModel;

            m_Model           = model;
            m_InferenceDevice = inferenceDevice;
            m_TensorAllocator = new TensorCachingAllocator();
            if (model != null)
            {
#if BARRACUDA_VERBOSE
                m_Verbose = true;
#endif

                D.logEnabled = m_Verbose;

                barracudaModel = ModelLoader.Load(model);
                var executionDevice = inferenceDevice == InferenceDevice.GPU
                    ? WorkerFactory.Type.ComputePrecompiled
                    : WorkerFactory.Type.CSharp;
                m_Engine = WorkerFactory.CreateWorker(executionDevice, barracudaModel, m_Verbose);
            }
            else
            {
                barracudaModel = null;
                m_Engine       = null;
            }

            m_InferenceInputs = BarracudaModelParamLoader.GetInputTensors(barracudaModel);
            m_OutputNames     = BarracudaModelParamLoader.GetOutputNames(barracudaModel);
            m_TensorGenerator = new TensorGenerator(
                seed, m_TensorAllocator, m_Memories, barracudaModel);
            m_TensorApplier = new TensorApplier(
                brainParameters, seed, m_TensorAllocator, m_Memories, barracudaModel);
            m_InputsByName     = new Dictionary <string, Tensor>();
            m_InferenceOutputs = new List <TensorProxy>();
        }
        /// <summary>
        /// Initializes the Brain with the Model that it will use when selecting actions for
        /// the agents
        /// </summary>
        /// <param name="model"> The Barracuda model to load </param>
        /// <param name="actionSpec"> Description of the actions for the Agent.</param>
        /// <param name="inferenceDevice"> Inference execution device. CPU is the fastest
        /// option for most of ML Agents models. </param>
        /// <param name="seed"> The seed that will be used to initialize the RandomNormal
        /// and Multinomial objects used when running inference.</param>
        /// <exception cref="UnityAgentsException">Throws an error when the model is null
        /// </exception>
        public ModelRunner(
            NNModel model,
            ActionSpec actionSpec,
            InferenceDevice inferenceDevice,
            int seed = 0)
        {
            Model barracudaModel;

            m_Model           = model;
            m_ModelName       = model.name;
            m_InferenceDevice = inferenceDevice;
            m_TensorAllocator = new TensorCachingAllocator();
            if (model != null)
            {
#if BARRACUDA_VERBOSE
                m_Verbose = true;
#endif

                D.logEnabled = m_Verbose;

                barracudaModel = ModelLoader.Load(model);

                var failedCheck = BarracudaModelParamLoader.CheckModelVersion(
                    barracudaModel
                    );
                if (failedCheck != null)
                {
                    if (failedCheck.CheckType == BarracudaModelParamLoader.FailedCheck.CheckTypeEnum.Error)
                    {
                        throw new UnityAgentsException(failedCheck.Message);
                    }
                }

                WorkerFactory.Type executionDevice;
                switch (inferenceDevice)
                {
                case InferenceDevice.CPU:
                    executionDevice = WorkerFactory.Type.CSharp;
                    break;

                case InferenceDevice.GPU:
                    executionDevice = WorkerFactory.Type.ComputePrecompiled;
                    break;

                case InferenceDevice.Burst:
                    executionDevice = WorkerFactory.Type.CSharpBurst;
                    break;

                case InferenceDevice.Default:     // fallthrough
                default:
                    executionDevice = WorkerFactory.Type.CSharpBurst;
                    break;
                }
                m_Engine = WorkerFactory.CreateWorker(executionDevice, barracudaModel, m_Verbose);
            }
            else
            {
                barracudaModel = null;
                m_Engine       = null;
            }

            m_InferenceInputs = barracudaModel.GetInputTensors();
            m_OutputNames     = barracudaModel.GetOutputNames();
            m_TensorGenerator = new TensorGenerator(
                seed, m_TensorAllocator, m_Memories, barracudaModel);
            m_TensorApplier = new TensorApplier(
                actionSpec, seed, m_TensorAllocator, m_Memories, barracudaModel);
            m_InputsByName     = new Dictionary <string, Tensor>();
            m_InferenceOutputs = new List <TensorProxy>();
        }