/// <inheritdoc /> protected override void DecideAction() { if (_isControlled) { agentInfos.Clear(); return; } var currentBatchSize = agentInfos.Count(); if (currentBatchSize == 0) { return; } #if ENABLE_TENSORFLOW if (_engine == null) { Debug.LogError($"No model was present for the Brain {name}."); return; } // Prepare the input tensors to be feed into the engine _tensorGenerator.GenerateTensors(_inferenceInputs, currentBatchSize, agentInfos); // Prepare the output tensors to be feed into the engine _tensorGenerator.GenerateTensors(_inferenceOutputs, currentBatchSize, agentInfos); // Execute the Model Profiler.BeginSample($"MLAgents.{name}.ExecuteGraph"); _engine.ExecuteGraph(_inferenceInputs, _inferenceOutputs); Profiler.EndSample(); // Update the outputs _tensorApplier.ApplyTensors(_inferenceOutputs, agentInfos); #else if (agentInfos.Count > 0) { Debug.LogError(string.Format( "The brain {0} was set to inference mode but the Tensorflow library is not " + "present in the Unity project.", name)); } #endif agentInfos.Clear(); }
/// <inheritdoc /> protected override void DecideAction() { if (_isControlled) { agentInfos.Clear(); return; } var currentBatchSize = agentInfos.Count(); if (currentBatchSize == 0) { return; } Profiler.BeginSample("LearningBrain.DecideAction"); #if ENABLE_TENSORFLOW if (_engine == null) { Debug.LogError($"No model was present for the Brain {name}."); return; } // Prepare the input tensors to be feed into the engine _tensorGenerator.GenerateTensors(_inferenceInputs, currentBatchSize, agentInfos); // Prepare the output tensors to be feed into the engine _tensorGenerator.GenerateTensors(_inferenceOutputs, currentBatchSize, agentInfos); // Execute the Model Profiler.BeginSample($"MLAgents.{name}.ExecuteGraph"); _engine.ExecuteGraph(_inferenceInputs, _inferenceOutputs); Profiler.EndSample(); // Update the outputs _tensorApplier.ApplyTensors(_inferenceOutputs, agentInfos); #else if (_engine == null) { Debug.LogError($"No model was present for the Brain {name}."); return; } Profiler.BeginSample($"MLAgents.{name}.GenerateTensors"); // Prepare the input tensors to be feed into the engine _tensorGenerator.GenerateTensors(_inferenceInputs, currentBatchSize, agentInfos); Profiler.EndSample(); Profiler.BeginSample($"MLAgents.{name}.PrepareBarracudaInputs"); var inputs = PrepareBarracudaInputs(_inferenceInputs); Profiler.EndSample(); // Execute the Model Profiler.BeginSample($"MLAgents.{name}.ExecuteGraph"); _engine.Execute(inputs); Profiler.EndSample(); Profiler.BeginSample($"MLAgents.{name}.FetchBarracudaOutputs"); _inferenceOutputs = FetchBarracudaOutputs(_outputNames); Profiler.EndSample(); Profiler.BeginSample($"MLAgents.{name}.ApplyTensors"); // Update the outputs _tensorApplier.ApplyTensors(_inferenceOutputs, agentInfos); Profiler.EndSample(); #endif agentInfos.Clear(); Profiler.EndSample(); }