コード例 #1
0
ファイル: MainPage.xaml.cs プロジェクト: ibebbs/UwpMl
        private async void Button_Click(object sender, RoutedEventArgs e)
        {
            // Use Picket to get file
            var file = await GetImageFile();

            SoftwareBitmap softwareBitmap;

            byte[] bytes;


            // Load image & scale to tensor input dimensions
            using (IRandomAccessStream stream = await file.OpenAsync(FileAccessMode.Read))
            {
                bytes = await GetImageAsByteArrayAsync(stream, 320, 320, BitmapPixelFormat.Rgba8);

                softwareBitmap = await GetImageAsSoftwareBitmapAsync(stream, 320, 320, BitmapPixelFormat.Bgra8);
            }

            // Display source image
            var source = new SoftwareBitmapSource();
            await source.SetBitmapAsync(softwareBitmap);

            sourceImage.Source = source;

            // Convert rgba-rgba-...-rgba to bb...b-rr...r-gg...g as colour weighted tensor (0..1)
            TensorFloat input = TensorFloat.CreateFromIterable(new long[] { 1, 3, 320, 320 }, TensorBrg(bytes));

            // Load model & perform inference
            StorageFile modelFile = await StorageFile.GetFileFromApplicationUriAsync(new Uri($"ms-appx:///Assets/u2net.onnx"));

            u2netModel model = await u2netModel.CreateFromStreamAsync(modelFile);

            Stopwatch sw = new Stopwatch();

            sw.Start();
            u2netOutput output = await model.EvaluateAsync(new u2netInput { input = input });

            sw.Stop();

            await ToImage(output.o6, o6);
            await ToImage(output.o5, o5);
            await ToImage(output.o4, o4);
            await ToImage(output.o3, o3);
            await ToImage(output.o2, o2);
            await ToImage(output.o1, o1);

            await ToBlendedImage(bytes, output.o0, targetImage);
        }
コード例 #2
0
        unsafe private TensorFloat CustomTensorize(List <VideoFrame> frameList, List <float> mean, List <float> std, bool toRGB = false)
        {
            int               temp_len       = frameList.Count();
            SoftwareBitmap    softwareBitmap = frameList[0].SoftwareBitmap;
            Int32             height         = softwareBitmap.PixelHeight;
            Int32             width          = softwareBitmap.PixelWidth;
            BitmapPixelFormat pixelFormat    = softwareBitmap.BitmapPixelFormat;

            Int32 channels = BitmapPixelFormat.Gray8 == pixelFormat ? 1 : 3;

            List <Int64> shape = new List <Int64>()
            {
                1, temp_len, channels, height, width
            };                                                                              // B,T,C,H,W

            // The channels of image stored in buffer is in order of BGRA-BGRA-BGRA-BGRA.
            // Then we transform it to the order of BBBBB....GGGGG....RRRR....AAAA(dropped)
            TensorFloat tf = TensorFloat.Create(shape);
            byte *      pCPUTensorbyte;
            float *     pCPUTensor;
            uint        uCapacity;

            // The channels of image stored in buffer is in order of BGRA-BGRA-BGRA-BGRA.
            // Then we transform it to the order of BBBBB....GGGGG....RRRR....AAAA(dropped)
            var tfr  = tf.CreateReference();
            var tfr2 = (IMemoryBufferByteAccess)tfr;

            tfr2.GetBuffer(out pCPUTensorbyte, out uCapacity);
            pCPUTensor = (float *)pCPUTensorbyte;

            for (Int32 t = 0; t < temp_len; t += 1)
            {
                VideoFrame     frame           = frameList[t];
                SoftwareBitmap softwareBitmap2 = frame.SoftwareBitmap;
                // 1. Get the access to buffer of softwarebitmap
                BitmapBuffer           spBitmapBuffer = softwareBitmap2.LockBuffer(BitmapBufferAccessMode.Read);
                IMemoryBufferReference reference      = spBitmapBuffer.CreateReference();

                byte *pData;
                uint  size;
                ((IMemoryBufferByteAccess)reference).GetBuffer(out pData, out size);

                // 2. Transform the data in buffer to a vector of float
                var offset = (height * width * channels) * t;
                if (BitmapPixelFormat.Bgra8 == pixelFormat)
                {
                    for (UInt32 i = 0; i < size; i += 4)
                    {
                        if (toRGB)
                        {
                            // suppose the model expects BGR image.
                            // index 0 is B, 1 is G, 2 is R, 3 is alpha(dropped).
                            UInt32 pixelInd = i / 4;
                            pCPUTensor[offset + (height * width * 0) + pixelInd] = (((float)pData[i + 2]) - mean[0]) / std[0];
                            pCPUTensor[offset + (height * width * 1) + pixelInd] = (((float)pData[i + 1]) - mean[1]) / std[1];
                            pCPUTensor[offset + (height * width * 2) + pixelInd] = (((float)pData[i + 0]) - mean[2]) / std[2];
                        }
                        else
                        {
                            // suppose the model expects BGR image.
                            // index 0 is B, 1 is G, 2 is R, 3 is alpha(dropped).
                            UInt32 pixelInd = i / 4;
                            pCPUTensor[offset + (height * width * 0) + pixelInd] = (((float)pData[i + 0]) - mean[0]) / std[0];
                            pCPUTensor[offset + (height * width * 1) + pixelInd] = (((float)pData[i + 1]) - mean[1]) / std[1];
                            pCPUTensor[offset + (height * width * 2) + pixelInd] = (((float)pData[i + 2]) - mean[2]) / std[2];
                        }
                    }
                }
                else if (BitmapPixelFormat.Rgba8 == pixelFormat)
                {
                    for (UInt32 i = 0; i < size; i += 4)
                    {
                        // suppose the model expects BGR image.
                        // index 0 is B, 1 is G, 2 is R, 3 is alpha(dropped).
                        if (toRGB)
                        {
                            // suppose the model expects BGR image.
                            // index 0 is B, 1 is G, 2 is R, 3 is alpha(dropped).
                            UInt32 pixelInd = i / 4;
                            pCPUTensor[offset + (height * width * 0) + pixelInd] = (((float)pData[i + 0]) - mean[0]) / std[0];
                            pCPUTensor[offset + (height * width * 1) + pixelInd] = (((float)pData[i + 1]) - mean[1]) / std[1];
                            pCPUTensor[offset + (height * width * 2) + pixelInd] = (((float)pData[i + 2]) - mean[2]) / std[2];
                        }
                        else
                        {
                            UInt32 pixelInd = i / 4;
                            pCPUTensor[offset + (height * width * 0) + pixelInd] = (((float)pData[i + 2]) - mean[0]) / std[0];
                            pCPUTensor[offset + (height * width * 1) + pixelInd] = (((float)pData[i + 1]) - mean[1]) / std[1];
                            pCPUTensor[offset + (height * width * 2) + pixelInd] = (((float)pData[i + 0]) - mean[2]) / std[2];
                        }
                    }
                }
                else if (BitmapPixelFormat.Gray8 == pixelFormat)
                {
                    for (UInt32 i = 0; i < size; i += 4)
                    {
                        // suppose the model expects BGR image.
                        // index 0 is B, 1 is G, 2 is R, 3 is alpha(dropped).
                        UInt32 pixelInd = i / 4;
                        float  red      = (float)pData[i + 2];
                        float  green    = (float)pData[i + 1];
                        float  blue     = (float)pData[i];
                        float  gray     = 0.2126f * red + 0.7152f * green + 0.0722f * blue;
                        pCPUTensor[offset + pixelInd] = gray;
                    }
                }
            }

            // to prepend following error, copy to another instance and use it as model input.
            // The tensor has outstanding memory buffer references that must be closed prior to evaluation!
            TensorFloat ret = TensorFloat.CreateFromIterable(
                tf.Shape,
                tf.GetAsVectorView());

            return(ret);
        }