/// <summary> /// Compiles the model into an executable graph /// </summary> /// <param name="optimizer">The optimization algorithm to use for training the model</param> /// <param name="losses">The losses for each of the outputs of the model</param> /// <remarks>The list of loss functions should be in order of the outputs of the model</remarks> public void Compile(Optimizer optimizer, IEnumerable <LossFunction> losses) { if (optimizer == null) { throw new ArgumentNullException( "The optimizer must be specified", nameof(optimizer)); } if (losses.Count() != _outputs.Count()) { throw new ArgumentException( "The number of loss functions does not match the number of outputs of the model", nameof(losses)); } _graph = new TFGraph(); var compilationContext = new ModelCompilationContext(_graph); _optimizer = optimizer; _inputMapping = new Dictionary <Input, TFOutput>(); _outputMapping = new Dictionary <Layer, TFOutput>(); _placeholderMapping = new Dictionary <Layer, TFOutput>(); var compiledLosses = new List <TFOutput>(); var layersWithLosses = Enumerable.Zip(_outputs, losses, (layer, loss) => (layer, loss)); // By compiling the outputs, the layers that are connected // to the outputs are also compiled. This goes all the way back to the inputs. foreach (var(layer, loss) in layersWithLosses) { var placeholder = _graph.Placeholder(TFDataType.Double, new TFShape(layer.OutputShape)); var output = layer.Compile(compilationContext); _outputMapping.Add(layer, output); _placeholderMapping.Add(layer, placeholder); var compiledLoss = loss.Compile(compilationContext, output, placeholder); compiledLosses.Add(compiledLoss); } foreach (var input in _inputs) { _inputMapping.Add(input, input.Configuration.Output); } _modelLoss = compiledLosses.Aggregate((left, right) => _graph.Add(left, right)); _optimizer.Compile(compilationContext, _modelLoss, compilationContext.Parameters); _initializers = compilationContext.Initializers; _parameters = compilationContext.Parameters; }