CNTK.MinibatchSource create_minibatch_source(CNTK.NDShape shape, int start_index, int sample_count, string prefix, bool is_training = false, bool use_augmentations = false) { var map_filename = create_map_txt_file_if_needed(start_index, sample_count, prefix); var transforms = new List <CNTK.CNTKDictionary>(); if (use_augmentations) { var randomSideTransform = CNTK.CNTKLib.ReaderCrop("RandomSide", new Tuple <int, int>(0, 0), new Tuple <float, float>(0.8f, 1.0f), new Tuple <float, float>(0.0f, 0.0f), new Tuple <float, float>(1.0f, 1.0f), "uniRatio"); transforms.Add(randomSideTransform); } var scaleTransform = CNTK.CNTKLib.ReaderScale(width: shape[1], height: shape[0], channels: shape[2]); transforms.Add(scaleTransform); var imageDeserializer = CNTK.CNTKLib.ImageDeserializer(map_filename, "labels", 2, "features", transforms); var minibatchSourceConfig = new CNTK.MinibatchSourceConfig(new CNTK.DictionaryVector() { imageDeserializer }); if (!is_training) { minibatchSourceConfig.randomizationWindowInChunks = 0; minibatchSourceConfig.randomizationWindowInSamples = 0; } return(CNTK.CNTKLib.CreateCompositeMinibatchSource(minibatchSourceConfig)); }
public static CNTK.Value get_tensors(CNTK.NDShape shape, float[] src, int[] indices, int indices_begin, int indices_end, CNTK.DeviceDescriptor device) { var cpu_tensors = Util.get_minibatch_data_CPU(shape, src, indices, indices_begin, indices_end); var result = CNTK.Value.CreateBatch(shape, cpu_tensors, device, true); return(result); }
/// <summary> /// Add a pooling layer to a neural network. /// </summary> /// <param name="input">The neural network to expand</param> /// <param name="poolingType">The type of pooling to perform</param> /// <param name="windowShape">The shape of the pooling window</param> /// <param name="strides">The stride lengths</param> /// <returns>The neural network with the pooling layer added.</returns> public static CNTK.Variable Pooling( this CNTK.Variable input, CNTK.PoolingType poolingType, CNTK.NDShape windowShape, int[] strides) { return(CNTK.CNTKLib.Pooling(input, poolingType, windowShape, strides)); }
static float[] get_minibatch_data_CPU(CNTK.NDShape shape, float[] src, int indices_begin, int indices_end) { // it would be nice if we avoid the copy here var result = new float[indices_end - indices_begin]; Array.Copy(src, indices_begin, result, 0, result.Length); return(result); }
static float[] get_minibatch_data_CPU(CNTK.NDShape shape, float[] src, int[] indices, int indices_begin, int indices_end) { var num_indices = indices_end - indices_begin; var row_length = shape.TotalSize; var result = new float[num_indices]; var row_index = 0; for (var index = indices_begin; index != indices_end; index++) { result[row_index++] = src[indices[index]]; } return(result); }
static CNTK.NDArrayView[] get_minibatch_data_CPU(CNTK.NDShape shape, float[][] src, int indices_begin, int indices_end) { var num_indices = indices_end - indices_begin; var result = new CNTK.NDArrayView[num_indices]; var row_index = 0; for (var index = indices_begin; index != indices_end; index++) { var dataBuffer = src[index]; var ndArrayView = new CNTK.NDArrayView(shape, dataBuffer, CNTK.DeviceDescriptor.CPUDevice, true); result[row_index++] = ndArrayView; } return(result); }
/// <summary> /// Reshape the current network tensor to the new shape. /// </summary> /// <param name="input">The neural network</param> /// <param name="newShape">The new shape to reshape the tensor to</param> /// <returns>The neural network with the reshape layer added</returns> public static CNTK.Variable Reshape( this CNTK.Variable input, CNTK.NDShape newShape) { return(CNTK.CNTKLib.Reshape(input, newShape)); }