internal static InternalArray bin_conv2d(InternalArray input, InternalArray weight, InternalArray bias, InternalArray alpha, int[] kernel_size, int[] stride, int[] padding) { var col_tensor = THWrapper.THFloatTensor_new(); var output = THWrapper.THFloatTensor_new(); var _alpha = alpha.ToTHTensor(); var _input = input.ToTHTensor(); var _weight = weight.ToTHTensor(); IntPtr _bias; if (bias == null) { _bias = THWrapper.THFloatTensor_new(); } else { _bias = bias.ToTHTensor(); } binop.THNN_Bin_SpatialConvolutionMM_updateOutput(_input, output, _weight, _bias, col_tensor, _alpha, kernel_size[0], kernel_size[1], stride[0], stride[1], padding[0], padding[1]); THWrapper.THFloatTensor_free(col_tensor); var ret = InternalArray.FromTHFloatTensor(output); THWrapper.THFloatTensor_free(output); THWrapper.THFloatTensor_free(_bias); THWrapper.THFloatTensor_free(_input); THWrapper.THFloatTensor_free(_alpha); return(ret); }
public static void THNN_Bin_SpatialConvolutionMM_updateOutput_frame( IntPtr output, //float IntPtr weight, //int IntPtr bias, //float IntPtr ones, //float IntPtr bin_col, //int IntPtr alphas, //float int kW, int kH, int dW, int dH, int padW, int padH, Int64 nInputPlane, Int64 inputWidth, Int64 inputHeight, Int64 nOutputPlane, Int64 outputWidth, Int64 outputHeight, bool quantOutput = false) { IntPtr output2d; //var output2d = THFloatTensor_newWithStorage2d(output->storage, output->storageOffset, nOutputPlane, -1, outputHeight * outputWidth, -1); //var output2d = THFloatTensor_newWithStorage2d(output, (int)nOutputPlane, -1, (int)(outputHeight * outputWidth), -1); var strg = THWrapper.THFloatTensor_storage(output); var offset = THWrapper.THFloatTensor_storageOffset(output); output2d = THWrapper.THFloatTensor_newWithStorage2d(strg, offset, nOutputPlane, (long)-1, outputWidth * outputHeight, (long)-1); //InternalArray output2d = new InternalArray(new int[] { }); THWrapper.THFloatTensor_zero(output2d); binary_gemm_cpu(weight, bin_col, output2d, (int)nOutputPlane, (int)(kW * kH * nInputPlane), (int)(outputHeight * outputWidth), 0, 1, 1, alphas, quantOutput); if (bias != null && THWrapper.THFloatTensor_nDimension(bias) != 0) { THWrapper.THFloatTensor_addmm(output2d, 1, output2d, 1, bias, ones); //THFloatTensor_addmm(output2d, 1, output2d, 1, bias, ones); } THWrapper.THFloatTensor_free(output2d); //THWrapper.THFloatTensor_free(_ones); //THFloatTensor_free(output2d); }
internal static InternalArray fpbin_conv2d(InternalArray input, InternalArray weight, InternalArray bias, InternalArray alpha, int[] kernel_size, int[] stride, int[] padding) { var col_tensor = THWrapper.THFloatTensor_new(); var output = THWrapper.THFloatTensor_new(); var _alpha = alpha.ToTHTensor(); var cln = new InternalArray(input.Shape); for (int i = 0; i < cln.Data.Length; i++) { cln.Data[i] = input.QIntData[i]; } var _input = cln.ToTHTensor(); var _weight = weight.ToTHTensor(); IntPtr _bias; if (bias == null) { _bias = THWrapper.THFloatTensor_new(); } else { _bias = bias.ToTHTensor(); } binop.THNN_Bin_SpatialConvolutionMM_updateOutput(_input, output, _weight, _bias, col_tensor, _alpha, kernel_size[0], kernel_size[1], stride[0], stride[1], padding[0], padding[1], true); THWrapper.THFloatTensor_free(col_tensor); var ret = InternalArray.FromTHFloatTensor(output); ret.QIntData = new short[ret.Data.Length]; for (int i = 0; i < ret.Data.Length; i++) { ret.QIntData[i] = (short)ret.Data[i]; } ret.Data = null; THWrapper.THFloatTensor_free(output); THWrapper.THFloatTensor_free(_bias); THWrapper.THFloatTensor_free(_input); THWrapper.THFloatTensor_free(_alpha); return(ret); }
internal static InternalArray fpbin_linear(InternalArray input, InternalArray weight, InternalArray bias, InternalArray alpha) { var m = input.Shape[0]; var n = input.Shape[1]; var k = weight.Shape[0]; /** * * m = input.data.shape[0] * n = input.data.shape[1] * k = weight.data.shape[0] * out_tensor = torch.FloatTensor() * bin_input = torch.IntTensor() * use_cuda = input.is_cuda * binop.encode_rows_cpu(input.data, bin_input) * binop.binary_gemm_cpu(bin_input, weight.data, output.data, m, n, k, 1, 0, 0, alpha.data) * output.data.mul_(alpha.data.t().expand(output.shape)) * if bias is not None: * output.data.add_(bias.data.expand(output.shape)) * return output * */ //InternalArray output = new InternalArray(new int[] { }); var cln = new InternalArray(input.Shape); for (int i = 0; i < cln.Data.Length; i++) { cln.Data[i] = input.QIntData[i]; } var _input = cln.ToTHTensor(); var bin_input = THWrapper.THIntTensor_new(); encode_rows_cpu(_input, bin_input); //var temp = InternalArray.FromTHIntTensor(bin_input); var _alpha = alpha.ToTHTensor(); //var _bin_input = bin_input.ToTHTensor(); var _weight = weight.ToTHTensor(); var _output = THWrapper.THFloatTensor_new(); binop.fpbinary_gemm_cpu(bin_input, _weight, _output, m, n, k, 1, 0, 0, _alpha); var temp2 = InternalArray.FromTHFloatTensor(_output); THWrapper.THFloatTensor_free(_input); THWrapper.THIntTensor_free(bin_input); //var tt = alpha.ToTHTensor(); var ttt = alpha.Transpose2D(); ttt.QIntData = new short[ttt.Data.Length]; for (int i = 0; i < ttt.Data.Length; i++) { ttt.QIntData[i] = (short)(ttt.Data[i] * 256); } ttt.Data = null; //var newt=THWrapper.THFloatTensor_newTranspose(tt, 0, 1); /*output.data.mul_(alpha.data.t().expand(output.shape)) */ if (bias != null) { throw new NotImplementedException(); /* * if bias is not None: * output.data.add_(bias.data.expand(output.shape))*/ } var output = InternalArray.FromTHFloatTensor(_output); output.QIntData = new short[output.Data.Length]; for (int i = 0; i < output.QIntData.Length; i++) { output.QIntData[i] = (short)(output.Data[i] * 256); } output.Data = null; for (int i = 0; i < ttt.QIntData.Length; i++) { var val4 = (short)((int)(output.QIntData[i] * ttt.QIntData[i]) >> 8); //output.QIntData[i] = output.Data[i] * ttt.Data[i]; output.QIntData[i] = val4; } THWrapper.THFloatTensor_free(_output); return(output); }
internal static InternalArray bin_linear(InternalArray input, InternalArray weight, InternalArray bias, InternalArray alpha) { var m = input.Shape[0]; var n = input.Shape[1]; var k = weight.Shape[0]; /** * * m = input.data.shape[0] * n = input.data.shape[1] * k = weight.data.shape[0] * out_tensor = torch.FloatTensor() * bin_input = torch.IntTensor() * use_cuda = input.is_cuda * binop.encode_rows_cpu(input.data, bin_input) * binop.binary_gemm_cpu(bin_input, weight.data, output.data, m, n, k, 1, 0, 0, alpha.data) * output.data.mul_(alpha.data.t().expand(output.shape)) * if bias is not None: * output.data.add_(bias.data.expand(output.shape)) * return output * */ //InternalArray output = new InternalArray(new int[] { }); var _input = input.ToTHTensor(); var bin_input = THWrapper.THIntTensor_new(); encode_rows_cpu(_input, bin_input); var temp = InternalArray.FromTHIntTensor(bin_input); var _alpha = alpha.ToTHTensor(); //var _bin_input = bin_input.ToTHTensor(); var _weight = weight.ToTHTensor(); var _output = THWrapper.THFloatTensor_new(); binop.binary_gemm_cpu(bin_input, _weight, _output, m, n, k, 1, 0, 0, _alpha); var temp2 = InternalArray.FromTHFloatTensor(_output); THWrapper.THFloatTensor_free(_input); THWrapper.THIntTensor_free(bin_input); //var tt = alpha.ToTHTensor(); var ttt = alpha.Transpose2D(); //var newt=THWrapper.THFloatTensor_newTranspose(tt, 0, 1); /*output.data.mul_(alpha.data.t().expand(output.shape)) */ if (bias != null) {/* * if bias is not None: * output.data.add_(bias.data.expand(output.shape))*/ } var output = InternalArray.FromTHFloatTensor(_output); for (int i = 0; i < ttt.Data.Length; i++) { output.Data[i] *= ttt.Data[i]; } THWrapper.THFloatTensor_free(_output); return(output); }