Пример #1
0
        /// <summary>
        /// map on the list of tensors unpacked from `elems` on dimension 0.
        /// </summary>
        /// <param name="fn"></param>
        /// <param name="elems"></param>
        /// <param name="dtype"></param>
        /// <param name="parallel_iterations"></param>
        /// <param name="back_prop"></param>
        /// <param name="swap_memory"></param>
        /// <param name="infer_shape"></param>
        /// <param name="name"></param>
        /// <returns>A tensor or (possibly nested) sequence of tensors.</returns>
        public static Tensor map_fn(Func <Tensor, Tensor> fn,
                                    Tensor elems,
                                    TF_DataType dtype       = TF_DataType.DtInvalid,
                                    int parallel_iterations = 10,
                                    bool back_prop          = true,
                                    bool swap_memory        = false,
                                    bool infer_shape        = true,
                                    string name             = null)
        {
            bool input_is_sequence = nest.is_sequence(elems);

            Tensor[] input_flatten(Tensor x) => input_is_sequence?nest.flatten(x).ToArray() : new [] { x };
            Tensor input_pack(Tensor[] x) => input_is_sequence ? (Tensor)nest.pack_sequence_as(elems, x) : x[0];

            bool output_is_sequence;
            Func <Tensor, Tensor[]> output_flatten;
            Func <Tensor[], Tensor> output_pack;

            if (dtype == TF_DataType.DtInvalid)
            {
                output_is_sequence = input_is_sequence;
                output_flatten     = input_flatten;
                output_pack        = input_pack;
            }
            else
            {
                output_is_sequence = nest.is_sequence(dtype);
                output_flatten     = (x) => output_is_sequence?nest.flatten(x).ToArray() : new [] { x };
                output_pack        = (x) => output_is_sequence ? (Tensor)nest.pack_sequence_as(dtype, x) : x[0];
            }

            var elems_flat = input_flatten(elems);

            return(tf_with(ops.name_scope(name, "map", elems_flat), delegate
            {
                //if in_graph_mode:
                //# Any get_variable calls in fn will cache the first call locally
                //# and not issue repeated network I/O requests for each iteration.
                //varscope = vs.get_variable_scope()
                //varscope_caching_device_was_none = False
                //if varscope.caching_device is None:
                //  # TODO(ebrevdo): Change to using colocate_with here and in other
                //  # methods.
                //  varscope.set_caching_device(lambda op: op.device)
                //  varscope_caching_device_was_none = True

                elems_flat = elems_flat.Select(elem => ops.convert_to_tensor(elem, name: "elem"))
                             .ToArray();

                dtype = elems_flat.Select(elem => elem.dtype).First();
                var dtype_flat = new[] { dtype };

                // Convert elems to tensor array. n may be known statically.
                var static_shape = elems_flat[0].shape;

                var n = static_shape[0];

                // TensorArrays are always flat
                var elems_ta = elems_flat.Select(elem => new TensorArray(dtype: elem.dtype,
                                                                         size: ops.convert_to_tensor(n),
                                                                         dynamic_size: false,
                                                                         infer_shape: true)).ToArray();

                // Unpack elements
                var elems_ta_1 = new List <TensorArray>();
                foreach (var(elem_ta, elem) in zip(elems_ta, elems_flat))
                {
                    elems_ta_1.Add(elem_ta.unstack(elem));
                }

                elems_ta = elems_ta_1.ToArray();

                var i = constant_op.constant(0);

                var accs_ta = dtype_flat.Select(dt => new TensorArray(dtype: dt,
                                                                      size: ops.convert_to_tensor(n),
                                                                      dynamic_size: false,
                                                                      infer_shape: infer_shape)).ToArray();


                BodyItem compute(BodyItem item)
                {
                    var packed_values = input_pack(elems_ta.Select(elem_ta => elem_ta.read(item.I)).ToArray());
                    var packed_fn_values = fn(packed_values);
                    //nest.assert_same_structure(dtype or elems, packed_fn_values)

                    var flat_fn_values = output_flatten(packed_fn_values);
                    for (int j = 0; j < item.Accs_ta.Length; j++)
                    {
                        item.Accs_ta[j].write(item.I, flat_fn_values[j]);
                    }

                    return new BodyItem(item.I + 1, item.Accs_ta);
                }

                var r_a = control_flow_ops.while_loop(
                    (x) => x.I < n,
                    compute,
                    new BodyItem(i, accs_ta),
                    parallel_iterations: parallel_iterations,
                    back_prop: back_prop,
                    swap_memory: swap_memory,
                    maximum_iterations: tf.constant(n));
                var results_flat = r_a.Accs_ta.Select(r => r.stack()).ToArray();

                var n_static = new Dimension(tensor_shape.dimension_value(elems_flat[0].TensorShape.with_rank_at_least(1).dims[0]));

                foreach (var elem in elems_flat.Skip(1))
                {
                    n_static.merge_with(new Dimension(tensor_shape.dimension_value(elem.TensorShape.with_rank_at_least(1).dims[0])));
                }

                foreach (Tensor r in results_flat)
                {
                    r.set_shape(new TensorShape(n_static).concatenate(r.dims.Skip(1).ToArray()));
                }

                // todo get working when the above caching_device is fixed
                //if (in_graph_mode && varscope_caching_device_was_none) {
                //    varscope.set_caching_device(None);
                //}

                return output_pack(results_flat);
            }));
Пример #2
0
        public static TensorShape constant_value_as_shape(Tensor tensor)
        {
            bool hasattr(Graph property, string attr)
            {
                var t = property.GetType().GetProperties();

                foreach (System.Reflection.PropertyInfo pi in t)
                {
                    if (pi.Name == attr)
                    {
                        return(true);
                    }
                }
                return(false);
            }

            if (tensor.GetType() == typeof(EagerTensor))
            {
                int[] dims = {};
                foreach (int dim in tensor.numpy())
                {
                    if (dim != 1)
                    {
                        dims[dims.Length] = dim;
                    }
                    else
                    {
                        // -1 == Unknown
                        dims[dims.Length] = -1;
                    }
                }
                return(new TensorShape(dims));
            }

            if (tensor.TensorShape.ndim == 0)
            {
                var value_ = constant_value(tensor);
                if (value_ == null)
                {
                    throw new ValueError(
                              @"Received a scalar with unknown value as shape; require a statically
known scalar with value '-1' to describe an unknown shape.");
                }
                if (value_ != -1)
                {
                    throw new ValueError(
                              String.Format(@"Received a scalar value {0} as shape; require a statically known
scalar with value '-1' to describe an unknown shape.", value_));
                }
                return(tensor.TensorShape.unknown_shape(-1));
            }

            var shape = tensor.TensorShape.with_rank(1);

            if (shape == new TensorShape(new int[] { 1 }))
            {
                return(new TensorShape(new int[] {}));
            }
            else if (tensor.op.type == "Cast")
            {
                var pre_cast = constant_value_as_shape(tensor.op.inputs[0]);
                if (pre_cast.dims == null)
                {
                    return(pre_cast);
                }
                var cast_dtype = dtypes.as_dtype((Type)tensor.op.get_attr("DstT"));
                if (!Array.Exists(new [] { dtypes.int32, dtypes.int64 }, cast_dtype_ => cast_dtype_ == cast_dtype))
                {
                    return(tensor.TensorShape.unknown_shape(shape.dims[0]));
                }

                int[] x_ = {};
                foreach (var x in pre_cast.as_list())
                {
                    if (x != -1)
                    {
                        x_[x_.Length] = x;
                    }
                    else
                    {
                        x_[x_.Length] = -1;
                    }
                }
                var dest_dtype_shape_array = np.array(x_).astype(cast_dtype.as_numpy_dtype());

                int[] y_ = {};
                foreach (int y in dest_dtype_shape_array)
                {
                    if (y >= 0)
                    {
                        y_[y_.Length] = y;
                    }
                    else
                    {
                        y_[y_.Length] = -1;
                    }
                }
                return(new TensorShape(y_));
            }
            else if (tensor.op.type == "Shape")
            {
                return(tensor.op.inputs[0].shape);
            }
            else if (tensor.op.type == "Pack")
            {
                var ret_ = new TensorShape(new int[] {});
                if ((int)tensor.op.get_attr("axis") != 0)
                {
                    throw new ValueError(String.Format(
                                             @"Since rank 1 inputs are expected, Pack's axis: {0} must be 0, otherwise it
would not be rank 1.", tensor.op.get_attr("axis")));
                }
                foreach (Tensor pack_input in tensor.op.inputs)
                {
                    var       pack_input_val = constant_value(pack_input);
                    Dimension new_dim;
                    if (pack_input_val < 0)
                    {
                        new_dim = new Dimension(-1);
                    }
                    else if (pack_input_val == null)
                    {
                        new_dim = new Dimension(-1);
                    }
                    else
                    {
                        new_dim = new Dimension(pack_input_val);
                    }
                    ret_ = ret_.concatenate(new int[] { new_dim });
                }
                return(ret_);
            }
            else if (tensor.op.type == "Concat")
            {
                var ret_ = new TensorShape(new int[] {});

                var inputlist_ = new ArraySegment <Tensor>(tensor.op.inputs, 1,
                                                           tensor.op.inputs.Length - 1);
                foreach (var concat_input in inputlist_)
                {
                    ret_ = ret_.concatenate(constant_value_as_shape(concat_input));
                }
                return(ret_);
            }
            else if (tensor.op.type == "StridedSlice")
            {
                try
                {
                    var begin   = constant_value(tensor.op.inputs[1]);
                    var end     = constant_value(tensor.op.inputs[2]);
                    var strides = constant_value(tensor.op.inputs[3]);
                    if (new [] { begin, end, strides }.All(x => x == null))
                    {
                        begin   = begin[0];
                        end     = end[0];
                        strides = strides[0];
                        var begin_mask = tensor.op.get_attr("begin_mask");
                        if ((int)begin_mask == 1)
                        {
                            begin = null;
                        }
                        var end_mask = tensor.op.get_attr("end_mask");
                        if ((int)end_mask == 1)
                        {
                            end = null;
                        }

                        var ellipsis_mask    = tensor.op.get_attr("ellipsis_mask");
                        var new_axis_mask    = tensor.op.get_attr("new_axis_mask");
                        var shrink_axis_mask = tensor.op.get_attr("shrink_axis_mask");

                        bool valid_attributes;
                        if (!(bool)ellipsis_mask && !(bool)new_axis_mask &&
                            !(bool)shrink_axis_mask && !((bool)begin_mask || (int)begin_mask == 1) &&
                            !((bool)end_mask || (int)end_mask == 1))
                        {
                            valid_attributes = true;
                        }
                        else
                        {
                            valid_attributes = false;
                        }
                        if (valid_attributes)
                        {
                            // sorry for the mess here, but this hacky solution was the best way
                            // i could come up with to implement the things done in python in c#
                            var prev_ = constant_value_as_shape(tensor.op.inputs[0]).dims;
                            var prev  = prev_.Skip(begin).Take(end - begin).ToArray();
                            // 100 being the comparison doesn't really matter here; it's going to break anyway
                            for (int iter = 0; iter != 100; iter = iter + strides)
                            {
                                prev[prev.Length] = prev_[iter];
                                if ((iter + strides) > prev_.Length)
                                {
                                    break;
                                }
                            }
                            var ret_ = new TensorShape(prev);
                            return(ret_);
                        }
                    }
                } catch (Exception ex)
                {
                    if (ex is ValueError || ex is TypeError)
                    {
                    }
                }
            }
            else if (tensor.op.type == "Placeholder" &&
                     tensor.op.graph.building_function &&
                     hasattr(tensor.op.graph, "internal_captures"))
            {
                int i = 0;
                foreach (Tensor capture in tensor.op.graph.internal_captures())
                {
                    if (capture.GetType() == typeof(Tensor))
                    {
                        var external_capture = tensor.op.graph.external_captures()[i];
                        return(constant_value_as_shape(external_capture));
                    }

                    i++;
                }
            }

            var ret   = tensor.TensorShape.unknown_shape(shape.dims[0]);
            var value = constant_value(tensor);

            if (value != null)
            {
                int[] d_ = {};
                foreach (int d in value)
                {
                    if (d >= 0)
                    {
                        d_[d_.Length] = d;
                    }
                    else
                    {
                        d_[d_.Length] = -1; // None
                    }
                }
                ret = ret.merge_with(new TensorShape(d_));
            }
            return(ret);
        }
Пример #3
0
        public static Tensor scan(
            Func <Tensor, Tensor, Tensor> fn,
            Tensor elems,
            Tensor initializer      = null,
            int parallel_iterations = 10,
            bool back_prop          = true,
            bool swap_memory        = false,
            bool infer_shape        = true,
            bool reverse            = false,
            string name             = null)
        {
            bool input_is_sequence = nest.is_sequence(elems);

            Tensor[] input_flatten(Tensor x) => input_is_sequence?nest.flatten(x).ToArray() : new [] { x };
            Tensor input_pack(Tensor[] x) => input_is_sequence ? (Tensor)nest.pack_sequence_as(elems, x) : x[0];

            bool output_is_sequence;
            Func <Tensor, Tensor[]> output_flatten;
            Func <Tensor[], Tensor> output_pack;

            if (initializer == null)
            {
                output_is_sequence = input_is_sequence;
                output_flatten     = input_flatten;
                output_pack        = input_pack;
            }
            else
            {
                output_is_sequence = nest.is_sequence(initializer);
                output_flatten     = (x) => output_is_sequence?nest.flatten(x).ToArray() : new [] { x };
                output_pack        = (x) => output_is_sequence ? (Tensor)nest.pack_sequence_as(initializer, x) : x[0];
            }

            var elems_flat = input_flatten(elems);

            bool in_graph_mode = tf.context.executing_eagerly();

            return(tf_with(ops.name_scope(name, "scan", new { elems_flat }), scope =>
            {
                if (in_graph_mode)
                {
                    // todo tf.net doesn't expose .caching_device
                    //// Any get_variable calls in fn will cache the first call locally
                    //// and not issue repeated network I/O requests for each iteration.
                    //var varscope = variable_scope.get_variable_scope();
                    //bool varscope_caching_device_was_none = false;
                    //if (varscope.caching_device = null)
                    //{
                    //    //      varscope.set_caching_device(lambda op: op.device)
                    //    //      varscope_caching_device_was_none = True
                    //}
                }

                elems_flat = elems_flat.Select(elem => ops.convert_to_tensor(elem, name: "elem")).ToArray();

                var n = tensor_shape.dimension_value(elems_flat[0].shape[0]);

                // todo python had the below but dimension_value returns int which can't be null
                //if (n == null)
                //{
                //    n = array_ops.shape(elems_flat[0])[0];
                //}

                var elems_ta = elems_flat.Select(elem => new TensorArray(
                                                     elem.dtype,
                                                     size: tf.constant(n),
                                                     dynamic_size: false,
                                                     element_shape: elem.shape.Skip(1).ToArray(),
                                                     infer_shape: true)).ToList();

                for (int index = 0; index < elems_ta.Count; index++)
                {
                    elems_ta[index].unstack(elems_flat[index]);
                }

                Tensor[] a_flat;
                int i;
                if (initializer == null)
                {
                    a_flat = elems_ta.Select(elem => elem.read(tf.constant(reverse ? n - 1 : 0))).ToArray();
                    i = 1;
                }
                else
                {
                    Tensor[] initializer_flat = output_flatten(initializer);
                    a_flat = initializer_flat.Select(init => ops.convert_to_tensor(init)).ToArray();
                    i = 0;
                }

                var accs_ta = a_flat.Select(init => new TensorArray(
                                                dtype: init.dtype,
                                                size: tf.constant(n),
                                                element_shape: infer_shape ? init.shape : null,
                                                dynamic_size: false,
                                                infer_shape: infer_shape)).ToArray();

                if (initializer == null)
                {
                    for (int index = 0; index < accs_ta.Length; index++)
                    {
                        accs_ta[index].write(tf.constant(reverse ? n - 1 : 0), a_flat[index]);
                    }
                }

                BodyItem compute(BodyItem item)
                {
                    var packed_elems = input_pack(elems_ta.Select(elem_ta => elem_ta.read(item.I)).ToArray());
                    var packed_a = output_pack(item.A_Flat);
                    var a_out = fn(packed_a, packed_elems);

                    var flat_a_out = output_flatten(a_out);
                    for (int j = 0; j < item.Accs_ta.Length; j++)
                    {
                        item.Accs_ta[j].write(item.I, flat_a_out[j]);
                    }

                    var next_i = reverse ? item.I - 1 : item.I + 1;
                    return new BodyItem(next_i, flat_a_out, item.Accs_ta);
                }

                int initial_i;
                Func <BodyItem, Tensor> condition;
                if (reverse)
                {
                    initial_i = n - 1 - i;
                    condition = x => x.I >= 0;
                }
                else
                {
                    initial_i = i;
                    condition = x => x.I < n;
                }

                BodyItem bodyItem =
                    control_flow_ops.while_loop(
                        condition,
                        compute,
                        new BodyItem(tf.constant(initial_i), a_flat, accs_ta),
                        parallel_iterations: parallel_iterations,
                        back_prop: back_prop,
                        swap_memory: swap_memory,
                        maximum_iterations: tf.constant(n));

                var results_flat = bodyItem.Accs_ta.Select(r => r.stack()).ToArray();

                var n_static = new Dimension(tensor_shape.dimension_value(elems_flat[0].TensorShape.with_rank_at_least(1).dims[0]));

                foreach (var elem in elems_flat.Skip(1))
                {
                    n_static.merge_with(new Dimension(tensor_shape.dimension_value(elem.TensorShape.with_rank_at_least(1).dims[0])));
                }

                foreach (Tensor r in results_flat)
                {
                    r.set_shape(new TensorShape(n_static).concatenate(r.dims.Skip(1).ToArray()));
                }

                // todo get working when the above caching_device is fixed
                //if (in_graph_mode && varscope_caching_device_was_none) {
                //    varscope.set_caching_device(None);
                //}

                return output_pack(results_flat);
            }));
        }