/// <summary> /// Add a dense layer to a neural network. /// </summary> /// <param name="input">The neural network to expand.</param> /// <param name="outputDim">The number of dimensions in the dense layer.</param> /// <param name="outputName">The name of the layer.</param> /// <returns>The neural network with the dense layer added.</returns> public static CNTK.Variable Dense( this CNTK.Variable input, int outputDim, string outputName = "") { var shape = CNTK.NDShape.CreateNDShape(new int[] { outputDim, CNTK.NDShape.InferredDimension }); var timesParam = new CNTK.Parameter( shape, CNTK.DataType.Float, CNTK.CNTKLib.GlorotUniformInitializer( CNTK.CNTKLib.DefaultParamInitScale, CNTK.CNTKLib.SentinelValueForInferParamInitRank, CNTK.CNTKLib.SentinelValueForInferParamInitRank, 1), CurrentDevice, "timesParam_" + outputName); var timesFunction = CNTK.CNTKLib.Times( timesParam, input, 1 /* output dimension */, 0 /* CNTK should infer the input dimensions */); var plusParam = new CNTK.Parameter( CNTK.NDShape.CreateNDShape(new int[] { CNTK.NDShape.InferredDimension }), 0.0f, CurrentDevice, "plusParam_" + outputName); var result = CNTK.CNTKLib.Plus(plusParam, timesFunction, outputName); return(result); }
void create_model(ref CNTK.Function model, ref float[][] labels) { var target_image = preprocess_image(target_image_path, img_height, img_width); var style_reference_image = preprocess_image(style_reference_image_path, img_height, img_width); var base_model = create_base_content_and_styles_model(img_height, img_width); labels = compute_labels(base_model, target_image, style_reference_image); var dream_weights_init = new CNTK.NDArrayView(new int[] { img_width, img_height, 3 }, target_image, computeDevice); var dream_weights = new CNTK.Parameter(dream_weights_init, "the_dream"); var dummy_features = CNTK.Variable.InputVariable(new int[] { 1 }, CNTK.DataType.Float, "dummy_features"); var dream_layer = CNTK.CNTKLib.ElementTimes(dream_weights, dummy_features, "the_dream_layler"); var replacements = new Dictionary <CNTK.Variable, CNTK.Variable>() { { base_model.Arguments[0], dream_layer.Output } }; model = base_model.Clone(CNTK.ParameterCloningMethod.Freeze, replacements); var all_outputs = new List <CNTK.Variable>() { dream_layer }; all_outputs.AddRange(model.Outputs); model = CNTK.Function.Combine(all_outputs, name: "overall_model"); }
void print_debugging_info() { if (computeDevice == null) { computeDevice = Util.get_compute_device(); } var features = CNTK.Variable.InputVariable(new int[] { 150, 150, 3 }, CNTK.DataType.Float, "features"); var adjusted_features = CNTK.CNTKLib.Plus(CNTK.Constant.Scalar <float>((float)(-110), computeDevice), features); var scalar_factor = CNTK.Constant.Scalar <float>((float)(1.0 / 255.0), computeDevice); var scaled_features = CNTK.CNTKLib.ElementTimes(scalar_factor, adjusted_features); var convolution_map_size = new int[] { 1, 1, CNTK.NDShape.InferredDimension, 3 }; var W = new CNTK.Parameter( CNTK.NDShape.CreateNDShape(convolution_map_size), CNTK.DataType.Float, CNTK.CNTKLib.GlorotUniformInitializer(CNTK.CNTKLib.DefaultParamInitScale, CNTK.CNTKLib.SentinelValueForInferParamInitRank, CNTK.CNTKLib.SentinelValueForInferParamInitRank, 1), computeDevice); var result = CNTK.CNTKLib.Convolution(W, scaled_features, strides: CNTK.NDShape.CreateNDShape(new int[] { 1 }), sharing: new CNTK.BoolVector(new bool[] { false }), autoPadding: new CNTK.BoolVector(new bool[] { true })); var model = VGG16.get_model(result, computeDevice); Util.PredorderTraverse(model); var shape = model.Output.Shape; Console.WriteLine(shape.AsString()); }
/// <summary> /// Adds a dream layer to a neural network. /// </summary> /// <param name="input">The neural network to extend.</param> /// <param name="image">The content image.</param> /// <param name="width">The width of the content image.</param> /// <param name="height">The height of the content image.</param> /// <returns>The neural network extended with a dream layer.</returns> public static CNTK.Function DreamLayer( this CNTK.Function input, float[] image, int width, int height) { // set up the dream layer var dream_weights_init = new CNTK.NDArrayView(new int[] { width, height, 3 }, image, NetUtil.CurrentDevice); var dream_weights = new CNTK.Parameter(dream_weights_init, "the_dream"); var dummy_features = CNTK.Variable.InputVariable(new int[] { 1 }, CNTK.DataType.Float, "dummy_features"); var dream_layer = CNTK.CNTKLib.ElementTimes(dream_weights, dummy_features, "the_dream_layer"); // combine the dream layer with the content and style layers var replacements = new Dictionary <CNTK.Variable, CNTK.Variable>() { { input.Arguments[0], dream_layer.Output } }; var model = input.Clone(CNTK.ParameterCloningMethod.Freeze, replacements); // return the finished model var all_outputs = new List <CNTK.Variable>() { dream_layer }; all_outputs.AddRange(model.Outputs); return(CNTK.Function.Combine(all_outputs, name: "overall_model")); }
/// <summary> /// Add an embedding layer to the neural network. /// </summary> /// <param name="input">The neural network to expand</param> /// <param name="embeddingDimensions">The number of embedding dimensions to create</param> /// <returns>The neural network with the dropout layer added</returns> public static CNTK.Variable Embedding( this CNTK.Variable input, int embeddingDimensions) { var weight_shape = new int[] { embeddingDimensions, CNTK.NDShape.InferredDimension }; var E = new CNTK.Parameter( weight_shape, CNTK.DataType.Float, CNTK.CNTKLib.GlorotUniformInitializer(), NetUtil.CurrentDevice); return(CNTK.CNTKLib.Times(E, input)); }
static public CNTK.Function Convolution2DWithReLU(CNTK.Variable input, int num_output_channels, int[] filter_shape, CNTK.DeviceDescriptor device, bool use_padding = false, bool use_bias = true, string outputName = "") { var convolution_map_size = new int[] { filter_shape[0], filter_shape[1], CNTK.NDShape.InferredDimension, num_output_channels }; var W = new CNTK.Parameter( CNTK.NDShape.CreateNDShape(convolution_map_size), CNTK.DataType.Float, CNTK.CNTKLib.GlorotUniformInitializer(CNTK.CNTKLib.DefaultParamInitScale, CNTK.CNTKLib.SentinelValueForInferParamInitRank, CNTK.CNTKLib.SentinelValueForInferParamInitRank, 1), device, outputName + "_W"); var result = CNTK.CNTKLib.Convolution(W, input, CNTK.NDShape.CreateNDShape(new int[] { 1 }) /*strides*/, new CNTK.BoolVector(new bool[] { true }) /* sharing */, new CNTK.BoolVector(new bool[] { use_padding })); if (use_bias) { var b = new CNTK.Parameter(CNTK.NDShape.CreateNDShape(new int[] { 1, 1, CNTK.NDShape.InferredDimension }), 0.0f, device, outputName + "_b"); result = CNTK.CNTKLib.Plus(result, b); } result = CNTK.CNTKLib.ReLU(result, outputName); return(result); }
/// <summary> /// Add a convolution transpose layer to the network. /// </summary> /// <param name="input">The neural network to extend</param> /// <param name="filterShape">The shape of the filters to use</param> /// <param name="numberOfFilters">The number of filters to use</param> /// <param name="activation">The activation function to use</param> /// <param name="padding">Set to true to use padding</param> /// <param name="strides">The stride lengths to use</param> /// <param name="bias">Set to true to introduce bias</param> /// <param name="outputShape">The output shape to generate</param> /// <param name="reductionRank"></param> /// <param name="dilation"></param> /// <param name="maxTempMemSizeInSamples"></param> /// <param name="name"></param> /// <returns></returns> static public CNTK.Variable ConvolutionTranspose( this CNTK.Variable input, int[] filterShape, int numberOfFilters, Func <CNTK.Variable, CNTK.Function> activation = null, bool padding = true, int[] strides = null, bool bias = true, int[] outputShape = null, uint reductionRank = 1, int[] dilation = null, uint maxTempMemSizeInSamples = 0, string name = "") { if (strides == null) { strides = new int[] { 1 }; } var sharing = PadToShape(filterShape, true); var thePadding = PadToShape(filterShape, padding); if (reductionRank != 1) { throw new NotSupportedException("reduction_rank should be 1"); } thePadding = ConcatenateArrays(thePadding, new bool[] { false }); if (dilation == null) { dilation = PadToShape(filterShape, 1); } var output_channels_shape = new int[] { numberOfFilters }; var kernel_shape = ConcatenateArrays(filterShape, output_channels_shape, new int[] { CNTK.NDShape.InferredDimension }); var output_full_shape = outputShape; if (output_full_shape != null) { output_full_shape = ConcatenateArrays(outputShape, output_channels_shape); } var filter_rank = filterShape.Length; var init = CNTK.CNTKLib.GlorotUniformInitializer(CNTK.CNTKLib.DefaultParamInitScale, CNTK.CNTKLib.SentinelValueForInferParamInitRank, CNTK.CNTKLib.SentinelValueForInferParamInitRank, 1); var W = new CNTK.Parameter(kernel_shape, CNTK.DataType.Float, init, NetUtil.CurrentDevice, name = "W"); var r = CNTK.CNTKLib.ConvolutionTranspose( convolutionMap: W, operand: input, strides: strides, sharing: new CNTK.BoolVector(sharing), autoPadding: new CNTK.BoolVector(thePadding), outputShape: output_full_shape, dilation: dilation, reductionRank: reductionRank, maxTempMemSizeInSamples: maxTempMemSizeInSamples); if (bias) { var b_shape = ConcatenateArrays(MakeOnesArray(filterShape.Length), output_channels_shape); var b = new CNTK.Parameter(b_shape, 0.0f, NetUtil.CurrentDevice, "B"); r = CNTK.CNTKLib.Plus(r, b); } if (activation != null) { r = activation(r); } return(r); }