コード例 #1
0
        /// <summary>
        /// Computes a 2-D convolution given 4-D `input` and `filter` tensors.
        ///
        /// Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
        /// and a filter / kernel tensor of shape
        /// `[filter_height, filter_width, in_channels, out_channels]`, this op
        /// performs the following:
        ///
        /// 1. Flattens the filter to a 2-D matrix with shape
        ///    `[filter_height * filter_width * in_channels, output_channels]`.
        /// 2. Extracts image patches from the input tensor to form a *virtual*
        ///    tensor of shape `[batch, out_height, out_width,
        ///    filter_height * filter_width * in_channels]`.
        /// 3. For each patch, right-multiplies the filter matrix and the image patch
        ///    vector.
        /// </summary>
        /// <param name="parameters"></param>
        /// <returns></returns>
        public static Tensor conv2d(Conv2dParams parameters)
        {
            if (tf.executing_eagerly())
            {
                var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
                                                            "Conv2D", parameters.Name,
                                                            null,
                                                            parameters.Input, parameters.Filter,
                                                            "strides", parameters.Strides,
                                                            "use_cudnn_on_gpu", parameters.UseCudnnOnGpu,
                                                            "padding", parameters.Padding,
                                                            "explicit_paddings", parameters.ExplicitPaddings,
                                                            "data_format", parameters.DataFormat,
                                                            "dilations", parameters.Dilations);

                return(results[0]);
            }

            var _op = tf.OpDefLib._apply_op_helper("Conv2D", name: parameters.Name, args: new
            {
                input             = parameters.Input,
                filter            = parameters.Filter,
                strides           = parameters.Strides,
                padding           = parameters.Padding,
                use_cudnn_on_gpu  = parameters.UseCudnnOnGpu,
                explicit_paddings = parameters.ExplicitPaddings,
                data_format       = parameters.DataFormat,
                dilations         = parameters.Dilations
            });

            return(_op.outputs[0]);
        }
コード例 #2
0
 /// <summary>
 /// Computes a 2-D convolution given 4-D `input` and `filter` tensors.
 ///
 /// Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
 /// and a filter / kernel tensor of shape
 /// `[filter_height, filter_width, in_channels, out_channels]`, this op
 /// performs the following:
 ///
 /// 1. Flattens the filter to a 2-D matrix with shape
 ///    `[filter_height * filter_width * in_channels, output_channels]`.
 /// 2. Extracts image patches from the input tensor to form a *virtual*
 ///    tensor of shape `[batch, out_height, out_width,
 ///    filter_height * filter_width * in_channels]`.
 /// 3. For each patch, right-multiplies the filter matrix and the image patch
 ///    vector.
 /// </summary>
 /// <param name="parameters"></param>
 /// <returns></returns>
 public static Tensor conv2d(Conv2dParams parameters)
 => tf.Context.ExecuteOp("Conv2D", parameters.Name, new ExecuteOpArgs(parameters.Input, parameters.Filter)
                         .SetAttributes(new
 {
     strides           = parameters.Strides,
     padding           = parameters.Padding,
     use_cudnn_on_gpu  = parameters.UseCudnnOnGpu,
     explicit_paddings = parameters.ExplicitPaddings,
     data_format       = parameters.DataFormat,
     dilations         = parameters.Dilations
 }));
コード例 #3
0
        /// <summary>
        /// Computes a 2-D convolution given 4-D `input` and `filter` tensors.
        ///
        /// Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
        /// and a filter / kernel tensor of shape
        /// `[filter_height, filter_width, in_channels, out_channels]`, this op
        /// performs the following:
        ///
        /// 1. Flattens the filter to a 2-D matrix with shape
        ///    `[filter_height * filter_width * in_channels, output_channels]`.
        /// 2. Extracts image patches from the input tensor to form a *virtual*
        ///    tensor of shape `[batch, out_height, out_width,
        ///    filter_height * filter_width * in_channels]`.
        /// 3. For each patch, right-multiplies the filter matrix and the image patch
        ///    vector.
        /// </summary>
        /// <param name="parameters"></param>
        /// <returns></returns>
        public static Tensor conv2d(Conv2dParams parameters)
        {
            var _op = _op_def_lib._apply_op_helper("Conv2D", name: parameters.Name, args: new
            {
                input             = parameters.Input,
                filter            = parameters.Filter,
                strides           = parameters.Strides,
                padding           = parameters.Padding,
                use_cudnn_on_gpu  = parameters.UseCudnnOnGpu,
                explicit_paddings = parameters.ExplicitPaddings,
                data_format       = parameters.DataFormat,
                dilations         = parameters.Dilations
            });

            return(_op.outputs[0]);
        }
コード例 #4
0
        /// <summary>
        /// Computes the gradients of convolution with respect to the input.
        /// </summary>
        /// <param name="parameters"></param>
        /// <returns></returns>
        public static Tensor conv2d_backprop_input(Conv2dParams parameters)
        {
            var _op = tf.OpDefLib._apply_op_helper("Conv2DBackpropInput", name: parameters.Name, args: new
            {
                input_sizes       = parameters.InputSizes,
                filter            = parameters.Filter,
                out_backprop      = parameters.OutBackProp,
                strides           = parameters.Strides,
                padding           = parameters.Padding,
                use_cudnn_on_gpu  = parameters.UseCudnnOnGpu,
                explicit_paddings = parameters.ExplicitPaddings,
                data_format       = parameters.DataFormat,
                dilations         = parameters.Dilations
            });

            return(_op.outputs[0]);
        }