void CacheBrainParameters(string behaviorName, BrainParameters brainParameters) { if (m_SentBrainKeys.Contains(behaviorName)) { return; } // TODO We should check that if m_unsentBrainKeys has brainKey, it equals brainParameters m_UnsentBrainKeys[behaviorName] = brainParameters; }
BrainParameters GetContinuous2vis8vec2actionBrainParameters() { var validBrainParameters = new BrainParameters(); validBrainParameters.VectorObservationSize = 8; validBrainParameters.VectorActionSize = new[] { 2 }; validBrainParameters.NumStackedVectorObservations = 1; validBrainParameters.VectorActionSpaceType = SpaceType.Continuous; return(validBrainParameters); }
/// <summary> /// Convert a BrainParametersProto to a BrainParameters struct. /// </summary> /// <param name="bpp">An instance of a brain parameters protobuf object.</param> /// <returns>A BrainParameters struct.</returns> public static BrainParameters ToBrainParameters(this BrainParametersProto bpp) { var bp = new BrainParameters { VectorActionDescriptions = bpp.VectorActionDescriptionsDeprecated.ToArray(), ActionSpec = ToActionSpec(bpp.ActionSpec), }; return(bp); }
public void Construction() { var bp = new BrainParameters(); var alloc = new TensorCachingAllocator(); var mem = new Dictionary <int, List <float> >(); var tensorGenerator = new TensorApplier(bp, 0, alloc, mem); Assert.IsNotNull(tensorGenerator); alloc.Dispose(); }
private BrainParameters GetDiscrete1vis0vec_2_3action_recurrModelBrainParameters() { var validBrainParameters = new BrainParameters(); validBrainParameters.VectorObservationSize = 0; validBrainParameters.VectorActionSize = new int[] { 2, 3 }; validBrainParameters.NumStackedVectorObservations = 1; validBrainParameters.VectorActionSpaceType = SpaceType.Discrete; return(validBrainParameters); }
public void NullMask() { var bp = new BrainParameters(); bp.vectorActionSpaceType = SpaceType.discrete; var masker = new ActionMasker(bp); var mask = masker.GetMask(); Assert.IsNull(mask); }
/// <summary> /// Factory for the ModelParamLoader : Creates a ModelParamLoader and runs the checks /// on it. /// </summary> /// <param name="model"> /// The Barracuda engine model for loading static parameters /// </param> /// <param name="brainParameters"> /// The BrainParameters that are used verify the compatibility with the InferenceEngine /// </param> /// <param name="sensorComponents">Attached sensor components</param> /// <returns>The list the error messages of the checks that failed</returns> public static IEnumerable <string> CheckModel(Model model, BrainParameters brainParameters, SensorComponent[] sensorComponents) { List <string> failedModelChecks = new List <string>(); if (model == null) { failedModelChecks.Add( "There is no model for this Brain, cannot run inference. " + "(But can still train)"); return(failedModelChecks); } var modelApiVersion = (int)model.GetTensorByName(TensorNames.VersionNumber)[0]; var memorySize = (int)model.GetTensorByName(TensorNames.MemorySize)[0]; var isContinuousInt = (int)model.GetTensorByName(TensorNames.IsContinuousControl)[0]; var isContinuous = GetActionType(isContinuousInt); var actionSize = (int)model.GetTensorByName(TensorNames.ActionOutputShape)[0]; if (modelApiVersion == -1) { failedModelChecks.Add( "Model was not trained using the right version of ML-Agents. " + "Cannot use this model."); return(failedModelChecks); } if (modelApiVersion != k_ApiVersion) { failedModelChecks.Add( $"Version of the trainer the model was trained with ({modelApiVersion}) " + $"is not compatible with the Brain's version ({k_ApiVersion})."); return(failedModelChecks); } failedModelChecks.AddRange( CheckIntScalarPresenceHelper(new Dictionary <string, int>() { { TensorNames.MemorySize, memorySize }, { TensorNames.IsContinuousControl, isContinuousInt }, { TensorNames.ActionOutputShape, actionSize } }) ); failedModelChecks.AddRange( CheckInputTensorPresence(model, brainParameters, memorySize, isContinuous, sensorComponents) ); failedModelChecks.AddRange( CheckOutputTensorPresence(model, memorySize)) ; failedModelChecks.AddRange( CheckInputTensorShape(model, brainParameters, sensorComponents) ); failedModelChecks.AddRange( CheckOutputTensorShape(model, brainParameters, isContinuous, actionSize) ); return(failedModelChecks); }
public void FailsWithContinuous() { var bp = new BrainParameters(); bp.vectorActionSpaceType = SpaceType.Continuous; bp.vectorActionSize = new[] { 4 }; var masker = new ActionMasker(bp); masker.SetActionMask(0, new[] { 0 }); Assert.Catch <UnityAgentsException>(() => masker.GetMask()); }
/// <summary> /// Convert a BrainParametersProto to a BrainParameters struct. /// </summary> /// <param name="bpp">An instance of a brain parameters protobuf object.</param> /// <returns>A BrainParameters struct.</returns> public static BrainParameters ToBrainParameters(this BrainParametersProto bpp) { var bp = new BrainParameters { VectorActionSize = bpp.VectorActionSize.ToArray(), VectorActionDescriptions = bpp.VectorActionDescriptions.ToArray(), VectorActionSpaceType = (SpaceType)bpp.VectorActionSpaceType }; return(bp); }
/// <summary> /// Generates failed checks that correspond to output shapes incompatibilities between /// the model and the BrainParameters. /// </summary> /// <param name="model"> /// The Barracuda engine model for loading static parameters /// </param> /// <param name="brainParameters"> /// The BrainParameters that are used verify the compatibility with the InferenceEngine /// </param> /// <param name="isContinuous"> /// Whether the model is expecting continuous or discrete control. /// </param> /// <param name="modelActionSize"> /// The size of the action output that is expected by the model. /// </param> /// <returns> /// A IEnumerable of string corresponding to the incompatible shapes between model /// and BrainParameters. /// </returns> static IEnumerable <string> CheckOutputTensorShape( Model model, BrainParameters brainParameters, ModelActionType isContinuous, int modelActionSize) { var failedModelChecks = new List <string>(); if (isContinuous == ModelActionType.Unknown) { failedModelChecks.Add("Cannot infer type of Control from the provided model."); return(failedModelChecks); } if (isContinuous == ModelActionType.Continuous && brainParameters.VectorActionSpaceType != SpaceType.Continuous) { failedModelChecks.Add( "Model has been trained using Continuous Control but the Brain Parameters " + "suggest Discrete Control."); return(failedModelChecks); } if (isContinuous == ModelActionType.Discrete && brainParameters.VectorActionSpaceType != SpaceType.Discrete) { failedModelChecks.Add( "Model has been trained using Discrete Control but the Brain Parameters " + "suggest Continuous Control."); return(failedModelChecks); } var tensorTester = new Dictionary <string, Func <BrainParameters, TensorShape, int, string> >(); if (brainParameters.VectorActionSpaceType == SpaceType.Continuous) { tensorTester[TensorNames.ActionOutput] = CheckContinuousActionOutputShape; } else { tensorTester[TensorNames.ActionOutput] = CheckDiscreteActionOutputShape; } // If the model expects an output but it is not in this list foreach (var name in model.outputs) { if (tensorTester.ContainsKey(name)) { var tester = tensorTester[name]; var error = tester.Invoke(brainParameters, model.GetShapeByName(name), modelActionSize); if (error != null) { failedModelChecks.Add(error); } } } return(failedModelChecks); }
/// <summary> /// Checks that the shape of the discrete action output is the same in the /// model and in the Brain Parameters. /// </summary> /// <param name="brainParameters"> /// The BrainParameters that are used verify the compatibility with the InferenceEngine /// </param> /// <param name="shape"> The tensor shape that is expected by the model</param> /// <param name="modelActionSize"> /// The size of the action output that is expected by the model. /// </param> /// <returns> /// If the Check failed, returns a string containing information about why the /// check failed. If the check passed, returns null. /// </returns> private static string CheckDiscreteActionOutputShape( BrainParameters brainParameters, TensorShape shape, int modelActionSize) { var bpActionSize = brainParameters.vectorActionSize.Sum(); if (modelActionSize != bpActionSize) { return("Action Size of the model does not match. The BrainParameters expect " + $"{bpActionSize} but the model contains {modelActionSize}."); } return(null); }
/// <summary> /// Checks that the shape of the continuous action output is the same in the /// model and in the Brain Parameters. /// </summary> /// <param name="brainParameters"> /// The BrainParameters that are used verify the compatibility with the InferenceEngine /// </param> /// <param name="shape"> The tensor shape that is expected by the model</param> /// <param name="modelActionSize"> /// The size of the action output that is expected by the model. /// </param> /// <returns>If the Check failed, returns a string containing information about why the /// check failed. If the check passed, returns null.</returns> static string CheckContinuousActionOutputShape( BrainParameters brainParameters, TensorShape shape, int modelActionSize) { var bpActionSize = brainParameters.VectorActionSize[0]; if (modelActionSize != bpActionSize) { return("Action Size of the model does not match. The BrainParameters expect " + $"{bpActionSize} but the model contains {modelActionSize}."); } return(null); }
public override void InitializeInner(BrainParameters brainParameters, Tensor inputStateTensor, List <Tensor> inputVisualTensors, TrainerParams trainerParams) { //build the network if (ActionSpace == SpaceType.continuous) { InitializeSLStructureContinuousAction(inputStateTensor, inputVisualTensors, trainerParams); } else if (ActionSpace == SpaceType.discrete) { InitializeSLStructureDiscreteAction(inputStateTensor, inputVisualTensors, trainerParams); } }
/// <summary> /// Checks that the shape of the Previous Vector Action input placeholder is the same in the /// model and in the Brain Parameters. /// </summary> /// <param name="brainParameters"> /// The BrainParameters that are used verify the compatibility with the InferenceEngine /// </param> /// <param name="tensorProxy"> The tensor that is expected by the model</param> /// <param name="sensorComponents">Array of attached sensor components</param> /// <returns>If the Check failed, returns a string containing information about why the /// check failed. If the check passed, returns null.</returns> static string CheckPreviousActionShape( BrainParameters brainParameters, TensorProxy tensorProxy, SensorComponent[] sensorComponents) { var numberActionsBp = brainParameters.VectorActionSize.Length; var numberActionsT = tensorProxy.shape[tensorProxy.shape.Length - 1]; if (numberActionsBp != numberActionsT) { return("Previous Action Size of the model does not match. " + $"Received {numberActionsBp} but was expecting {numberActionsT}."); } return(null); }
/// <summary> /// Creates or retrieves an existing ModelRunner that uses the same /// NNModel and the InferenceDevice as provided. /// </summary> /// <param name="model">The NNModel the ModelRunner must use.</param> /// <param name="brainParameters">The BrainParameters used to create the ModelRunner.</param> /// <param name="inferenceDevice"> /// The inference device (CPU or GPU) the ModelRunner will use. /// </param> /// <returns> The ModelRunner compatible with the input settings.</returns> internal ModelRunner GetOrCreateModelRunner( NNModel model, BrainParameters brainParameters, InferenceDevice inferenceDevice) { var modelRunner = m_ModelRunners.Find(x => x.HasModel(model, inferenceDevice)); if (modelRunner == null) { modelRunner = new ModelRunner(model, brainParameters, inferenceDevice, m_InferenceSeed); m_ModelRunners.Add(modelRunner); m_InferenceSeed++; } return(modelRunner); }
public void TestDefaultBrainParametersToProto() { // Should be able to convert a default instance to proto. var brain = new BrainParameters(); brain.ToProto("foo", false); Academy.Instance.TrainerCapabilities = new UnityRLCapabilities { BaseRLCapabilities = true, HybridActions = false }; brain.ToProto("foo", false); }
/// <summary> /// Writes brain parameters to file. /// </summary> /// <param name="brainName">The name of the Brain the agent is attached to.</param> /// <param name="brainParameters">The parameters of the Brain the agent is attached to.</param> void WriteBrainParameters(string brainName, BrainParameters brainParameters) { if (m_Writer == null) { // Already closed return; } // Writes BrainParameters to file. m_Writer.Seek(MetaDataBytes + 1, 0); var brainProto = brainParameters.ToProto(brainName, false); brainProto.WriteDelimitedTo(m_Writer); }
/// <summary> /// Checks that the shape of the Previous Vector Action input placeholder is the same in the /// model and in the Brain Parameters. /// </summary> /// <param name="brainParameters"> /// The BrainParameters that are used verify the compatibility with the InferenceEngine /// </param> /// <param name="tensorProxy"> The tensor that is expected by the model</param> /// <param name="sensorComponents">Array of attached sensor components (unused).</param> /// <param name="observableAttributeTotalSize">Sum of the sizes of all ObservableAttributes (unused).</param> /// <returns>If the Check failed, returns a string containing information about why the /// check failed. If the check passed, returns null.</returns> static string CheckPreviousActionShape( BrainParameters brainParameters, TensorProxy tensorProxy, SensorComponent[] sensorComponents, int observableAttributeTotalSize) { var numberActionsBp = brainParameters.ActionSpec.NumDiscreteActions; var numberActionsT = tensorProxy.shape[tensorProxy.shape.Length - 1]; if (numberActionsBp != numberActionsT) { return("Previous Action Size of the model does not match. " + $"Received {numberActionsBp} but was expecting {numberActionsT}."); } return(null); }
/// <summary> /// Returns a new TensorAppliers object. /// </summary> /// <param name="bp"> The BrainParameters used to determine what Appliers will be /// used</param> /// <param name="seed"> The seed the Appliers will be initialized with.</param> public TensorApplier(BrainParameters bp, int seed) { _dict[TensorNames.ValueEstimateOutput] = new ValueEstimateApplier(); if (bp.vectorActionSpaceType == SpaceType.continuous) { _dict[TensorNames.ActionOutput] = new ContinuousActionOutputApplier(); } else { _dict[TensorNames.ActionOutput] = new DiscreteActionOutputApplier( bp.vectorActionSize, seed); } _dict[TensorNames.RecurrentOutput] = new MemoryOutputApplier(); }
/// <summary> /// Checks that the shape of the Vector Observation input placeholder is the same in the /// model and in the Brain Parameters. /// </summary> /// <param name="brainParameters"> /// The BrainParameters that are used verify the compatibility with the InferenceEngine /// </param> /// <param name="tensorProxy">The tensor that is expected by the model</param> /// <returns> /// If the Check failed, returns a string containing information about why the /// check failed. If the check passed, returns null. /// </returns> static string CheckVectorObsShape( BrainParameters brainParameters, TensorProxy tensorProxy) { var vecObsSizeBp = brainParameters.vectorObservationSize; var numStackedVector = brainParameters.numStackedVectorObservations; var totalVecObsSizeT = tensorProxy.shape[tensorProxy.shape.Length - 1]; if (vecObsSizeBp * numStackedVector != totalVecObsSizeT) { return("Vector Observation Size of the model does not match. Received " + $"{vecObsSizeBp} x {numStackedVector} but was expecting {totalVecObsSizeT}."); } return(null); }
/// <summary> /// Converts a Brain into to a Protobuf BrainInfoProto so it can be sent /// </summary> /// <returns>The BrainInfoProto generated.</returns> /// <param name="bp">The instance of BrainParameter to extend.</param> /// <param name="name">The name of the brain.</param> /// <param name="isTraining">Whether or not the Brain is training.</param> public static BrainParametersProto ToProto(this BrainParameters bp, string name, bool isTraining) { var brainParametersProto = new BrainParametersProto { VectorActionSize = { bp.VectorActionSize }, VectorActionSpaceType = (SpaceTypeProto)bp.VectorActionSpaceType, BrainName = name, IsTraining = isTraining }; brainParametersProto.VectorActionDescriptions.AddRange(bp.VectorActionDescriptions); return(brainParametersProto); }
public void SubscribeBrain(string brainKey, BrainParameters brainParameters) { if (m_BehaviorNames.Contains(brainKey)) { return; } m_BehaviorNames.Add(brainKey); m_CurrentUnityRlOutput.AgentInfos.Add( brainKey, new UnityRLOutputProto.Types.ListAgentInfoProto() ); CacheBrainParameters(brainKey, brainParameters); }
/// <summary> /// Checks that the shape of the Previous Vector Action input placeholder is the same in the /// model and in the Brain Parameters. /// </summary> /// <param name="brainParameters"> /// The BrainParameters that are used verify the compatibility with the InferenceEngine /// </param> /// <param name="tensorProxy"> The tensor that is expected by the model</param> /// <param name="sensorComponents">Array of attached sensor components (unused).</param> /// <param name="observableAttributeTotalSize">Sum of the sizes of all ObservableAttributes (unused).</param> /// <returns>If the Check failed, returns a string containing information about why the /// check failed. If the check passed, returns null.</returns> static string CheckPreviousActionShape( BrainParameters brainParameters, TensorProxy tensorProxy, SensorComponent[] sensorComponents, int observableAttributeTotalSize) { // TODO: Update this check after intergrating ActionSpec into BrainParameters var numberActionsBp = brainParameters.VectorActionSize.Length; var numberActionsT = tensorProxy.shape[tensorProxy.shape.Length - 1]; if (numberActionsBp != numberActionsT) { return("Previous Action Size of the model does not match. " + $"Received {numberActionsBp} but was expecting {numberActionsT}."); } return(null); }
/// <summary> /// Generates failed checks that correspond to inputs expected by the model that are not /// present in the BrainParameters. /// </summary> /// <param name="model"> /// The Barracuda engine model for loading static parameters /// </param> /// <param name="brainParameters"> /// The BrainParameters that are used verify the compatibility with the InferenceEngine /// </param> /// <param name="memory"> /// The memory size that the model is expecting. /// </param> /// <param name="sensors">Array of attached sensor components</param> /// <param name="deterministicInference"> Inference only: set to true if the action selection from model should be /// Deterministic. </param> /// <returns> /// A IEnumerable of the checks that failed /// </returns> static IEnumerable <FailedCheck> CheckInputTensorPresence( Model model, BrainParameters brainParameters, int memory, ISensor[] sensors, bool deterministicInference = false ) { var failedModelChecks = new List <FailedCheck>(); var tensorsNames = model.GetInputNames(); for (var sensorIndex = 0; sensorIndex < sensors.Length; sensorIndex++) { if (!tensorsNames.Contains( TensorNames.GetObservationName(sensorIndex))) { var sensor = sensors[sensorIndex]; failedModelChecks.Add( FailedCheck.Warning("The model does not contain an Observation Placeholder Input " + $"for sensor component {sensorIndex} ({sensor.GetType().Name}).") ); } } // If the model has a non-negative memory size but requires a recurrent input if (memory > 0) { var modelVersion = model.GetVersion(); if (!tensorsNames.Any(x => x == TensorNames.RecurrentInPlaceholder)) { failedModelChecks.Add( FailedCheck.Warning("The model does not contain a Recurrent Input Node but has memory_size.") ); } } // If the model uses discrete control but does not have an input for action masks if (model.HasDiscreteOutputs(deterministicInference)) { if (!tensorsNames.Contains(TensorNames.ActionMaskPlaceholder)) { failedModelChecks.Add( FailedCheck.Warning("The model does not contain an Action Mask but is using Discrete Control.") ); } } return(failedModelChecks); }
/// <summary> /// Checks that the shape of the Vector Observation input placeholder is the same in the /// model and in the Brain Parameters. Tests the models created with the API of version 1.X /// </summary> /// <param name="brainParameters"> /// The BrainParameters that are used verify the compatibility with the InferenceEngine /// </param> /// <param name="tensorProxy">The tensor that is expected by the model</param> /// <param name="sensors">Array of attached sensor components</param> /// <param name="observableAttributeTotalSize">Sum of the sizes of all ObservableAttributes.</param> /// <returns> /// If the Check failed, returns a string containing information about why the /// check failed. If the check passed, returns null. /// </returns> static FailedCheck CheckVectorObsShapeLegacy( BrainParameters brainParameters, TensorProxy tensorProxy, ISensor[] sensors, int observableAttributeTotalSize) { var vecObsSizeBp = brainParameters.VectorObservationSize; var numStackedVector = brainParameters.NumStackedVectorObservations; var totalVecObsSizeT = tensorProxy.shape[tensorProxy.shape.Length - 1]; var totalVectorSensorSize = 0; foreach (var sens in sensors) { if ((sens.GetObservationSpec().Shape.Length == 1)) { totalVectorSensorSize += sens.GetObservationSpec().Shape[0]; } } if (totalVectorSensorSize != totalVecObsSizeT) { var sensorSizes = ""; foreach (var sensorComp in sensors) { if (sensorComp.GetObservationSpec().Shape.Length == 1) { var vecSize = sensorComp.GetObservationSpec().Shape[0]; if (sensorSizes.Length == 0) { sensorSizes = $"[{vecSize}"; } else { sensorSizes += $", {vecSize}"; } } } sensorSizes += "]"; return(FailedCheck.Warning( $"Vector Observation Size of the model does not match. Was expecting {totalVecObsSizeT} " + $"but received: \n" + $"Vector observations: {vecObsSizeBp} x {numStackedVector}\n" + $"Total [Observable] attributes: {observableAttributeTotalSize}\n" + $"Sensor sizes: {sensorSizes}." )); } return(null); }
/// <summary> /// Checks that the shape of the Vector Observation input placeholder is the same in the /// model and in the Brain Parameters. /// </summary> /// <param name="brainParameters"> /// The BrainParameters that are used verify the compatibility with the InferenceEngine /// </param> /// <param name="tensorProxy">The tensor that is expected by the model</param> /// <param name="sensorComponents">Array of attached sensor components</param> /// <param name="observableAttributeTotalSize">Sum of the sizes of all ObservableAttributes.</param> /// <returns> /// If the Check failed, returns a string containing information about why the /// check failed. If the check passed, returns null. /// </returns> static string CheckVectorObsShape( BrainParameters brainParameters, TensorProxy tensorProxy, SensorComponent[] sensorComponents, int observableAttributeTotalSize) { var vecObsSizeBp = brainParameters.VectorObservationSize; var numStackedVector = brainParameters.NumStackedVectorObservations; var totalVecObsSizeT = tensorProxy.shape[tensorProxy.shape.Length - 1]; var totalVectorSensorSize = 0; foreach (var sensorComp in sensorComponents) { if (sensorComp.IsVector()) { totalVectorSensorSize += sensorComp.GetObservationShape()[0]; } } totalVectorSensorSize += observableAttributeTotalSize; if (vecObsSizeBp * numStackedVector + totalVectorSensorSize != totalVecObsSizeT) { var sensorSizes = ""; foreach (var sensorComp in sensorComponents) { if (sensorComp.IsVector()) { var vecSize = sensorComp.GetObservationShape()[0]; if (sensorSizes.Length == 0) { sensorSizes = $"[{vecSize}"; } else { sensorSizes += $", {vecSize}"; } } } sensorSizes += "]"; return($"Vector Observation Size of the model does not match. Was expecting {totalVecObsSizeT} " + $"but received: \n" + $"Vector observations: {vecObsSizeBp} x {numStackedVector}\n" + $"Total [Observable] attributes: {observableAttributeTotalSize}\n" + $"SensorComponent sizes: {sensorSizes}."); } return(null); }
/// <summary> /// Converts a BrainParameters into to a BrainParametersProto so it can be sent. /// </summary> /// <returns>The BrainInfoProto generated.</returns> /// <param name="bp">The instance of BrainParameter to extend.</param> /// <param name="name">The name of the brain.</param> /// <param name="isTraining">Whether or not the Brain is training.</param> public static BrainParametersProto ToProto(this BrainParameters bp, string name, bool isTraining) { var brainParametersProto = new BrainParametersProto { VectorActionSizeDeprecated = { bp.VectorActionSize }, VectorActionSpaceTypeDeprecated = (SpaceTypeProto)bp.VectorActionSpaceType, BrainName = name, IsTraining = isTraining }; if (bp.VectorActionDescriptions != null) { brainParametersProto.VectorActionDescriptionsDeprecated.AddRange(bp.VectorActionDescriptions); } return(brainParametersProto); }
/// <summary> /// Returns a new TensorGenerators object. /// </summary> /// <param name="bp"> The BrainParameters used to determine what Generators will be /// used</param> /// <param name="seed"> The seed the Generators will be initialized with.</param> /// <param name="allocator"> Tensor allocator</param> /// <param name="barracudaModel"></param> public TensorGenerator( BrainParameters bp, int seed, ITensorAllocator allocator, object barracudaModel = null) { // Generator for Inputs m_Dict[TensorNames.BatchSizePlaceholder] = new BatchSizeGenerator(allocator); m_Dict[TensorNames.SequenceLengthPlaceholder] = new SequenceLengthGenerator(allocator); m_Dict[TensorNames.VectorObservationPlacholder] = new VectorObservationGenerator(allocator); m_Dict[TensorNames.RecurrentInPlaceholder] = new RecurrentInputGenerator(allocator); if (barracudaModel != null) { var model = (Model)barracudaModel; for (var i = 0; i < model?.memories.Length; i++) { m_Dict[model.memories[i].input] = new BarracudaRecurrentInputGenerator(i, allocator); } } m_Dict[TensorNames.PreviousActionPlaceholder] = new PreviousActionInputGenerator(allocator); m_Dict[TensorNames.ActionMaskPlaceholder] = new ActionMaskInputGenerator(allocator); m_Dict[TensorNames.RandomNormalEpsilonPlaceholder] = new RandomNormalInputGenerator(seed, allocator); if (bp.cameraResolutions != null) { for (var visIndex = 0; visIndex < bp.cameraResolutions.Length; visIndex++) { var index = visIndex; var bw = bp.cameraResolutions[visIndex].blackAndWhite; m_Dict[TensorNames.VisualObservationPlaceholderPrefix + visIndex] = new VisualObservationInputGenerator(index, bw, allocator); } } // Generators for Outputs m_Dict[TensorNames.ActionOutput] = new BiDimensionalOutputGenerator(allocator); m_Dict[TensorNames.RecurrentOutput] = new BiDimensionalOutputGenerator(allocator); m_Dict[TensorNames.ValueEstimateOutput] = new BiDimensionalOutputGenerator(allocator); }
/// <summary> /// Trainers will call this method to initialize the model. This method will call the InitializeInner() /// </summary> /// <param name="brainParameters">brain parameter of the MLagent brain</param> /// <param name="enableTraining">whether enable training</param> /// <param name="trainerParams">trainer parameters passed by the trainer. Training will not be enabled </param> public virtual void Initialize(BrainParameters brainParameters, bool enableTraining, TrainerParams trainerParams = null) { Debug.Assert(Initialized == false, "Model already Initalized"); NameScope ns = null; if (!string.IsNullOrEmpty(modelName)) { ns = Current.K.name_scope(modelName); } ActionSizes = brainParameters.vectorActionSize; StateSize = brainParameters.vectorObservationSize * brainParameters.numStackedVectorObservations; ActionSpace = brainParameters.vectorActionSpaceType; Debug.Assert(ActionSizes[0] > 0, "Action size can not be zero"); //create basic inputs var inputStateTensor = StateSize > 0 ? UnityTFUtils.Input(new int?[] { StateSize }, name: "InputStates")[0] : null; HasVectorObservation = inputStateTensor != null; var inputVisualTensors = CreateVisualInputs(brainParameters); HasVisualObservation = inputVisualTensors != null; //create inner intialization InitializeInner(brainParameters, inputStateTensor, inputVisualTensors, enableTraining ? trainerParams : null); //test //Debug.LogWarning("Tensorflow Graph is saved for test purpose at: SavedGraph/" + name + ".pb"); //((UnityTFBackend)Current.K).ExportGraphDef("SavedGraph/" + name + ".pb"); Current.K.try_initialize_variables(true); if (ns != null) { ns.Dispose(); } if (checkpointToLoad != null) { RestoreCheckpoint(checkpointToLoad.bytes, true); } Initialized = true; TrainingEnabled = enableTraining; }
/// <summary> /// Generate an InferenceEvent for the model. /// </summary> /// <param name="nnModel"></param> /// <param name="behaviorName"></param> /// <param name="inferenceDevice"></param> /// <param name="sensors"></param> /// <param name="actionSpec"></param> /// <returns></returns> internal static InferenceEvent GetEventForModel( NNModel nnModel, string behaviorName, InferenceDevice inferenceDevice, IList <ISensor> sensors, BrainParameters brainParameters ) { var barracudaModel = ModelLoader.Load(nnModel); var inferenceEvent = new InferenceEvent(); // Hash the behavior name so that there's no concern about PII or "secret" data being leaked. inferenceEvent.BehaviorName = AnalyticsUtils.Hash(behaviorName); inferenceEvent.BarracudaModelSource = barracudaModel.IrSource; inferenceEvent.BarracudaModelVersion = barracudaModel.IrVersion; inferenceEvent.BarracudaModelProducer = barracudaModel.ProducerName; inferenceEvent.MemorySize = (int)barracudaModel.GetTensorByName(TensorNames.MemorySize)[0]; inferenceEvent.InferenceDevice = (int)inferenceDevice; if (barracudaModel.ProducerName == "Script") { // .nn files don't have these fields set correctly. Assign some placeholder values. inferenceEvent.BarracudaModelSource = "NN"; inferenceEvent.BarracudaModelProducer = "tensorflow_to_barracuda.py"; } #if UNITY_2019_3_OR_NEWER && UNITY_EDITOR var barracudaPackageInfo = UnityEditor.PackageManager.PackageInfo.FindForAssembly(typeof(Tensor).Assembly); inferenceEvent.BarracudaPackageVersion = barracudaPackageInfo.version; #else inferenceEvent.BarracudaPackageVersion = null; #endif inferenceEvent.ActionSpec = EventActionSpec.FromBrainParameters(brainParameters); inferenceEvent.ObservationSpecs = new List <EventObservationSpec>(sensors.Count); foreach (var sensor in sensors) { inferenceEvent.ObservationSpecs.Add(EventObservationSpec.FromSensor(sensor)); } inferenceEvent.TotalWeightSizeBytes = GetModelWeightSize(barracudaModel); inferenceEvent.ModelHash = GetModelHash(barracudaModel); return(inferenceEvent); }