/// <summary> /// Convert the depth image frame to a color bitmap. /// </summary> /// <param name="bitmap"></param> /// <param name="image"></param> public static void SetDepthImage(Bitmap bitmap, ImageFrame image) { unsafe { Debug.Assert(image.bytesPerPixel == 2); Rectangle rectangle = new Rectangle(0, 0, image.width, image.height); BitmapData bitmapData = bitmap.LockBits(rectangle, ImageLockMode.WriteOnly, bitmap.PixelFormat); const int pixelSize = 3; for (int y = 0; y < image.height; y++) { //get the data from the new image byte* nRow = (byte*)bitmapData.Scan0 + (y * bitmapData.Stride); for (int x = 0; x < image.width; x++) { int byteNum = (y * image.width + x) * image.bytesPerPixel; byte value = (byte)(((image.Bytes[byteNum] << 8) + image.Bytes[byteNum + 1]) / 1000); //8191); //set the new image's pixel to the grayscale version nRow[x * pixelSize] = value; //B nRow[x * pixelSize + 1] = value; //G nRow[x * pixelSize + 2] = value; //R } } //unlock the bitmaps bitmap.UnlockBits(bitmapData); return; } }
internal void AddFrame(BitmapImage bitmap, double scaleFactor) { ImageFrame frame = new ImageFrame { Bitmap = bitmap, ScaleFactor = scaleFactor }; frames.Add (frame); frame.HorizontalSections = CreateSections (frame, Enumerable.Range (1, (int)bitmap.Width - 2).Select (n => bitmap.GetPixel (n, 0))); frame.VerticalSections = CreateSections (frame, Enumerable.Range (1, (int)bitmap.Height - 2).Select (n => bitmap.GetPixel (0, n))); double padLeft = 0, padTop = 0, padRight = 0, padBottom = 0; var hbox = CreateSections (frame, Enumerable.Range (1, (int)bitmap.Width - 1).Select (n => bitmap.GetPixel (n, (int)bitmap.Height - 1))); var sec = hbox.FirstOrDefault (s => s.Mode != RenderMode.Fixed); if (sec != null) { padLeft = sec.Start; padRight = bitmap.Width - 2 - padLeft - sec.Size; } var vbox = CreateSections (frame, Enumerable.Range (1, (int)bitmap.Height - 1).Select (n => bitmap.GetPixel ((int)bitmap.Width - 1, n))); sec = vbox.FirstOrDefault (s => s.Mode != RenderMode.Fixed); if (sec != null) { padTop = sec.Start; padBottom = bitmap.Height - 2 - padTop - sec.Size; } Padding = new WidgetSpacing (padLeft, padTop, padRight, padBottom); frame.StretchableWidth = frame.HorizontalSections.Where (s => s.Mode != RenderMode.Fixed).Sum (s => s.Size); frame.StretchableHeight = frame.VerticalSections.Where (s => s.Mode != RenderMode.Fixed).Sum (s => s.Size); }
/// <summary> /// Convert the binary image frame to a binary bitmap. /// </summary> /// <param name="bitmap"></param> /// <param name="image"></param> public static void SetBinaryImage(Bitmap bitmap, ImageFrame image) { unsafe { Debug.Assert(image.bytesPerPixel == 1); Rectangle rectangle = new Rectangle(0, 0, image.width, image.height); BitmapData bitmapData = bitmap.LockBits(rectangle, ImageLockMode.WriteOnly, bitmap.PixelFormat); Parallel.For(0, image.height, y => { //get the data from the new image byte* nRow = (byte*)bitmapData.Scan0 + (y * bitmapData.Stride); for (int x = 0; x < image.width; x++) { int byteNum = (y * image.width + x) * image.bytesPerPixel; int pos = x / 8; // The byte int shiftAmount = 7 - (x % 8); // The bit in the byte (going left to right) if (image.Bytes[byteNum] == 0) nRow[x / 8] &= (byte)~(1 << shiftAmount); // Set bit to 0 else nRow[x / 8] |= (byte)(1 << shiftAmount); // Set bit to 1 } }); //unlock the bitmaps bitmap.UnlockBits(bitmapData); } }
public void UpdateKinectDepth(ImageFrame Depth) { try { gGD.UpdateDepth(Depth); } catch { } }
public void DataSource_Filters_Then_Calls_ClusterFactory() { var imageFrame = new ImageFrame(); var filteredPoints = new List<Point>(); this.filterMock.Setup(m => m.Filter(imageFrame)).Returns(filteredPoints); this.clusterFactoryMock.Setup(m => m.Create(filteredPoints)).Returns(new ClusterCollection()); this.runtimeStub.InvokeDepthFrameReady(imageFrame); }
private static void TransformFrame(IBitmapProcessor bitmapProcessor, ImageFrame frame) { var bitmapCopy = (Bitmap)frame.Bitmap.Clone(); frame.Bitmap.Dispose(); frame.Bitmap = bitmapProcessor.Process(bitmapCopy, frame.GetBackgroundColor().Color); DefaultQuantizer.Quantize(frame.Bitmap, frame.GetPalette()); frame.ImageDescriptor.ImageWidth = (short)frame.Bitmap.Width; frame.ImageDescriptor.ImageHeight = (short)frame.Bitmap.Height; }
/// <summary> /// Convert the color image frame to a color bitmap. /// </summary> /// <param name="bitmap"></param> /// <param name="image"></param> public static void SetColorImage(Bitmap bitmap, ImageFrame image) { Rectangle rectangle = new Rectangle(0, 0, image.width, image.height); BitmapData bitmapData = bitmap.LockBits(rectangle, ImageLockMode.WriteOnly, bitmap.PixelFormat); IntPtr ptr = bitmapData.Scan0; Marshal.Copy(image.Bytes, 0, ptr, image.width * image.height * image.bytesPerPixel); bitmap.UnlockBits(bitmapData); return; }
public void Can_Process_DepthImageData() { dataSource.Start(); var frame = new ImageFrame(); var data = new byte[this.runtimeStub.VideoStreamWidth * this.runtimeStub.VideoStreamHeight * 3]; for (int index = 0; index < data.Length; index++) { data[index] = 123; } frame.Image.Bits = data; this.runtimeStub.InvokeDepthFrameReady(frame); this.dataSource.Stop(); }
protected unsafe void ProcessFrame(ImageFrame frame) { var image = frame.Image; BitmapData bitmapData = this.CurrentValue.LockBits(new System.Drawing.Rectangle(0, 0, this.Width, this.Height), ImageLockMode.WriteOnly, System.Drawing.Imaging.PixelFormat.Format24bppRgb); byte* pDest = (byte*)bitmapData.Scan0.ToPointer(); int pointer = 0; var maxIndex = this.Width * this.Height; for (int index = 0; index < maxIndex; index++) { pDest[0] = image.Bits[pointer]; pDest[1] = image.Bits[pointer + 1]; pDest[2] = image.Bits[pointer + 2]; pDest += 3; pointer += image.BytesPerPixel; } this.CurrentValue.UnlockBits(bitmapData); this.OnNewDataAvailable(); }
static void DarkenRegion(Bitmap bitmap, ImageFrame region, float darkenFactor) { unsafe { Debug.Assert(region.bytesPerPixel == 1); Rectangle rectangle = new Rectangle(0, 0, region.width, region.height); BitmapData bitmapData = bitmap.LockBits(rectangle, ImageLockMode.WriteOnly, bitmap.PixelFormat); const int pixelSize = 3; for (int y = 0; y < region.height; y++) { //get the data from the new image byte* nRow = (byte*)bitmapData.Scan0 + (y * bitmapData.Stride); for (int x = 0; x < region.width; x++) { int byteNum = (y * region.width + x) * region.bytesPerPixel; if (region.Bytes[byteNum] == 0) { //set the new image's pixel to the grayscale version nRow[x * pixelSize] = (byte)(nRow[x * pixelSize] * darkenFactor); //B nRow[x * pixelSize + 1] = (byte)(nRow[x * pixelSize + 1] * darkenFactor); //G nRow[x * pixelSize + 2] = (byte)(nRow[x * pixelSize + 2] * darkenFactor); //R } } } //unlock the bitmaps bitmap.UnlockBits(bitmapData); return; } }
private WriteableBitmap DrawPixels( Runtime kinect, ImageFrame video, ImageFrame depth ) { // ピクセルごとのユーザーIDを取得する for ( int y = 0; y < depth.Image.Height; y++ ) { for ( int x = 0; x < depth.Image.Width; x++ ) { int index = (x + (y * depth.Image.Width)) * 2; byte byte0 = depth.Image.Bits[index]; byte byte1 = depth.Image.Bits[index + 1]; // ユーザーIDと距離を取得する int playerIndex = byte0 & 0x7; int distance = byte1 << 5 | byte0 >> 3; if ( playerIndex != 0 ) { // 距離座標をカメラ座標に変換する int videoX = 0, videoY = 0; kinect.NuiCamera.GetColorPixelCoordinatesFromDepthPixel( ImageResolution.Resolution640x480, new ImageViewArea(), x, y, 0, out videoX, out videoY ); int videoIndex = (videoX + (videoY * video.Image.Width)) * video.Image.BytesPerPixel; videoIndex = Math.Min( videoIndex, video.Image.Bits.Length - video.Image.BytesPerPixel ); video.Image.Bits[videoIndex] *= userColor[playerIndex].R; video.Image.Bits[videoIndex + 1] *= userColor[playerIndex].G; video.Image.Bits[videoIndex + 2] *= userColor[playerIndex].B; } } } //バイト列をビットマップに展開 //描画可能なビットマップを作る //http://msdn.microsoft.com/ja-jp/magazine/cc534995.aspx WriteableBitmap bitmap = new WriteableBitmap( video.Image.Width, video.Image.Height, 96, 96, PixelFormats.Bgr32, null ); bitmap.WritePixels( new Int32Rect( 0, 0, video.Image.Width, video.Image.Height ), video.Image.Bits, video.Image.Width * video.Image.BytesPerPixel, 0 ); return bitmap; }
private ImageFrame CreateDepthDataFrame() { var frame = new ImageFrame(); var data = new byte[this.size.Width * this.size.Height * 2]; int index = 0; for (int y = 0; y < this.size.Height; y++) { for (int x = 0; x < this.size.Width; x++) { index = (y * this.size.Width + this.size.Width -x -1) * 2; //Image is mirrored int depthValue = 0; if ((x == 2 && y == 2) || (x == 15 && y == 8)) { depthValue = 700; } data[index] = (byte)(depthValue % 256); data[index + 1] = (byte)(depthValue / 256); } } frame.Image.Bits = data; return frame; }
/// <summary> /// Gets the configuration for the image frame. /// </summary> /// <param name="source">The source image.</param> /// <returns>Returns the configuration.</returns> public static Configuration GetConfiguration(this ImageFrame source) => GetConfiguration((IConfigurationProvider)source);
void DrawNow(ImageFrame imageFrame) { outputTexture.LoadRawTextureData(imageFrame.MutablePixelData(), imageFrame.PixelDataSize()); outputTexture.Apply(); }
List<ImageSection> CreateSections(ImageFrame frame, IEnumerable<Color> pixels) { List<ImageSection> sections = new List<ImageSection> (); ImageSection section = null; int n = 0; foreach (var p in pixels) { RenderMode mode; // Don't compare exact colors. This is a workaround to make sure regions are properly detected when // there are small variations in colors. if (p.Red > 0.9 && p.Blue < 0.2 && p.Green < 0.2 && p.Alpha == 1) // Red-ish mode = RenderMode.Tile; else if (p.Red < 0.2 && p.Blue < 0.2 && p.Green < 0.2 && p.Alpha == 1) // Black-ish mode = RenderMode.Stretch; else mode = RenderMode.Fixed; if (section == null || mode != section.Mode) { section = new ImageSection { Start = n, Size = 1, Mode = mode }; sections.Add (section); } else section.Size++; n++; } return sections; }
/// <summary> /// This method is called after the process is applied to prepare the processor. /// </summary> /// <param name="source">The source image. Cannot be null.</param> protected virtual void AfterFrameApply(ImageFrame <TPixel> source) { }
/// <inheritdoc /> protected override void OnApply(ImageFrame <TPixel> source, Rectangle sourceRectangle, Configuration configuration) { Fast2DArray <float>[] kernels = { this.North, this.NorthWest, this.West, this.SouthWest, this.South, this.SouthEast, this.East, this.NorthEast }; int startY = sourceRectangle.Y; int endY = sourceRectangle.Bottom; int startX = sourceRectangle.X; int endX = sourceRectangle.Right; // Align start/end positions. int minX = Math.Max(0, startX); int maxX = Math.Min(source.Width, endX); int minY = Math.Max(0, startY); int maxY = Math.Min(source.Height, endY); // we need a clean copy for each pass to start from using (ImageFrame <TPixel> cleanCopy = source.Clone()) { new ConvolutionProcessor <TPixel>(kernels[0]).Apply(source, sourceRectangle, configuration); if (kernels.Length == 1) { return; } int shiftY = startY; int shiftX = startX; // Reset offset if necessary. if (minX > 0) { shiftX = 0; } if (minY > 0) { shiftY = 0; } // Additional runs. // ReSharper disable once ForCanBeConvertedToForeach for (int i = 1; i < kernels.Length; i++) { using (ImageFrame <TPixel> pass = cleanCopy.Clone()) { new ConvolutionProcessor <TPixel>(kernels[i]).Apply(pass, sourceRectangle, configuration); using (PixelAccessor <TPixel> passPixels = pass.Lock()) using (PixelAccessor <TPixel> targetPixels = source.Lock()) { Parallel.For( minY, maxY, configuration.ParallelOptions, y => { int offsetY = y - shiftY; for (int x = minX; x < maxX; x++) { int offsetX = x - shiftX; // Grab the max components of the two pixels TPixel packed = default(TPixel); packed.PackFromVector4(Vector4.Max(passPixels[offsetX, offsetY].ToVector4(), targetPixels[offsetX, offsetY].ToVector4())); targetPixels[offsetX, offsetY] = packed; } }); } } } } }
/// <inheritdoc/> protected override void OnFrameApply( ImageFrame <TPixel> source, Rectangle sourceRectangle, Configuration configuration) { int startY = sourceRectangle.Y; int endY = sourceRectangle.Bottom; int startX = sourceRectangle.X; int endX = sourceRectangle.Right; TPixel vignetteColor = this.definition.VignetteColor.ToPixel <TPixel>(); Vector2 centre = Rectangle.Center(sourceRectangle); Size sourceSize = source.Size(); float finalRadiusX = this.definition.RadiusX.Calculate(sourceSize); float finalRadiusY = this.definition.RadiusY.Calculate(sourceSize); float rX = finalRadiusX > 0 ? MathF.Min(finalRadiusX, sourceRectangle.Width * .5F) : sourceRectangle.Width * .5F; float rY = finalRadiusY > 0 ? MathF.Min(finalRadiusY, sourceRectangle.Height * .5F) : sourceRectangle.Height * .5F; float maxDistance = MathF.Sqrt((rX * rX) + (rY * rY)); // Align start/end positions. int minX = Math.Max(0, startX); int maxX = Math.Min(source.Width, endX); int minY = Math.Max(0, startY); int maxY = Math.Min(source.Height, endY); // Reset offset if necessary. if (minX > 0) { startX = 0; } if (minY > 0) { startY = 0; } int width = maxX - minX; int offsetX = minX - startX; var workingRect = Rectangle.FromLTRB(minX, minY, maxX, maxY); float blendPercentage = this.definition.GraphicsOptions.BlendPercentage; using (IMemoryOwner <TPixel> rowColors = source.MemoryAllocator.Allocate <TPixel>(width)) { rowColors.GetSpan().Fill(vignetteColor); ParallelHelper.IterateRowsWithTempBuffer <float>( workingRect, configuration, (rows, amounts) => { Span <float> amountsSpan = amounts.Span; for (int y = rows.Min; y < rows.Max; y++) { int offsetY = y - startY; for (int i = 0; i < width; i++) { float distance = Vector2.Distance(centre, new Vector2(i + offsetX, offsetY)); amountsSpan[i] = (blendPercentage * (.9F * (distance / maxDistance))).Clamp(0, 1); } Span <TPixel> destination = source.GetPixelRowSpan(offsetY).Slice(offsetX, width); this.blender.Blend( source.Configuration, destination, destination, rowColors.GetSpan(), amountsSpan); } }); } }
/// <summary> /// This method is called before the process is applied to prepare the processor. /// </summary> /// <param name="source">The source image. Cannot be null.</param> protected virtual void BeforeFrameApply(ImageFrame <TPixel> source) { }
public RowIntervalOperation(Configuration configuration, Rectangle bounds, ImageFrame <TPixel> source) { this.configuration = configuration; this.bounds = bounds; this.source = source; }
private static void WritePolygons(BinaryWriter writer, ImageFrame parent) { var polyGroups = parent.Children.Cast<PolygonGroup>(); var groupCount = parent.Children.Count(); writer.Write(groupCount); foreach (var polyGroup in polyGroups) { writer.Write(polyGroup.Name); writer.Write(polyGroup.Children.Count); foreach (var polygon in polyGroup.Children.Cast<Polygon>()) { writer.Write(polygon.Name); writer.Write(polygon.Children.Count); var validPoints = new List<PolyPoint>(); foreach (var point in polygon.Children.Cast<PolyPoint>()) { if (point.X < 0 || point.Y < 0) { Console.WriteLine(@"neg vertices for {0}!", parent.Name); validPoints.Add(new PolyPoint(0, 0) { Parent = point.Parent}); continue; } validPoints.Add(point); } foreach (var point in validPoints) { writer.Write(point.X); writer.Write(point.Y); writer.Write(point.MappedX); writer.Write(point.MappedY); } } } }
public abstract ImageSimilarityReport <TPixelA, TPixelB> CompareImagesOrFrames <TPixelA, TPixelB>( ImageFrame <TPixelA> expected, ImageFrame <TPixelB> actual) where TPixelA : struct, IPixel <TPixelA> where TPixelB : struct, IPixel <TPixelB>;
/// <inheritdoc/> protected override void OnFrameApply(ImageFrame <TPixel> sourceBase) { // All processing happens at the image level within BeforeImageApply(); }
/// <inheritdoc/> protected override void OnFrameApply(ImageFrame <TPixel> source, ImageFrame <TPixel> destination) { Matrix4x4 transformMatrix = this.definition.TransformMatrix; // Handle transforms that result in output identical to the original. if (transformMatrix.Equals(default) || transformMatrix.Equals(Matrix4x4.Identity))
public readonly IndexedImageFrame <TPixel> QuantizeFrame(ImageFrame <TPixel> source, Rectangle bounds) => FrameQuantizerUtilities.QuantizeFrame(ref Unsafe.AsRef(this), source, bounds);
/// <inheritdoc cref="IBrush{TPixel}" /> public abstract BrushApplicator <TPixel> CreateApplicator( ImageFrame <TPixel> source, RectangleF region, GraphicsOptions options);
// Converts a 16-bit grayscale depth frame which includes player indexes into a 32-bit frame // that displays different players in different colors byte[] convertDepthFrame(ImageFrame Image) { var width = Image.Image.Width; var height = Image.Image.Height; var greyIndex = 0; points = new List<Microsoft.Research.Kinect.Nui.Vector>(); int i32 = 0; int i16 = 0; int realDepth = 0; for (var y = 0; y < height; y++) { for (var x = 0; x < width; x++) { int player = Image.Image.Bits[i16] & 0x07; depthFrame32[i32 + RED_IDX] = 0; depthFrame32[i32 + GREEN_IDX] = 0; depthFrame32[i32 + BLUE_IDX] = 0; switch (Image.Type) { case ImageType.DepthAndPlayerIndex: realDepth = (((Image.Image.Bits[greyIndex] >> 3) | (Image.Image.Bits[greyIndex + 1] << 5)) << 3); points.Add(_kinectRuntime.SkeletonEngine.DepthImageToSkeleton(((float)x / Image.Image.Width), ((float)y / Image.Image.Height), (short)realDepth)); break; case ImageType.Depth: // depth comes back mirrored realDepth = (((Image.Image.Bits[greyIndex] | Image.Image.Bits[greyIndex + 1] << 8)) << 3); points.Add(_kinectRuntime.SkeletonEngine.DepthImageToSkeleton(((float)(width - x - 1) / Image.Image.Width), ((float)y / Image.Image.Height), (short)realDepth)); break; } byte intensity = (byte)(255 - (255 * realDepth / 0x0fff)); // choose different display colors based on player switch (player) { case 0: depthFrame32[i32 + RED_IDX] = (byte)(intensity / 2); depthFrame32[i32 + GREEN_IDX] = (byte)(intensity / 2); depthFrame32[i32 + BLUE_IDX] = (byte)(intensity / 2); break; case 1: depthFrame32[i32 + RED_IDX] = intensity; break; case 2: depthFrame32[i32 + GREEN_IDX] = intensity; break; case 3: depthFrame32[i32 + RED_IDX] = (byte)(intensity / 4); depthFrame32[i32 + GREEN_IDX] = (byte)(intensity); depthFrame32[i32 + BLUE_IDX] = (byte)(intensity); break; case 4: depthFrame32[i32 + RED_IDX] = (byte)(intensity); depthFrame32[i32 + GREEN_IDX] = (byte)(intensity); depthFrame32[i32 + BLUE_IDX] = (byte)(intensity / 4); break; case 5: depthFrame32[i32 + RED_IDX] = (byte)(intensity); depthFrame32[i32 + GREEN_IDX] = (byte)(intensity / 4); depthFrame32[i32 + BLUE_IDX] = (byte)(intensity); break; case 6: depthFrame32[i32 + RED_IDX] = (byte)(intensity / 2); depthFrame32[i32 + GREEN_IDX] = (byte)(intensity / 2); depthFrame32[i32 + BLUE_IDX] = (byte)(intensity); break; case 7: depthFrame32[i32 + RED_IDX] = (byte)(255 - intensity); depthFrame32[i32 + GREEN_IDX] = (byte)(255 - intensity); depthFrame32[i32 + BLUE_IDX] = (byte)(255 - intensity); break; } i32 += 4; i16 += 2; greyIndex += 2; } } return depthFrame32; }
void runtime_DepthFrameReady(object sender, ImageFrameReadyEventArgs e) { this.LastDepthFrame = e.ImageFrame; }
void AddFrame(ImageFrame frame) { if (frames == null) frames = new ImageFrame[] { frame }; else { Array.Resize (ref frames, frames.Length + 1); frames[frames.Length - 1] = frame; } }
public override ImageSimilarityReport <TPixelA, TPixelB> CompareImagesOrFrames <TPixelA, TPixelB>(ImageFrame <TPixelA> expected, ImageFrame <TPixelB> actual) { if (expected.Size() != actual.Size()) { throw new InvalidOperationException("Calling ImageComparer is invalid when dimensions mismatch!"); } int width = actual.Width; // TODO: Comparing through Rgba64 may not robust enough because of the existance of super high precision pixel types. var aBuffer = new Rgba64[width]; var bBuffer = new Rgba64[width]; float totalDifference = 0F; var differences = new List <PixelDifference>(); Configuration configuration = expected.Configuration; for (int y = 0; y < actual.Height; y++) { Span <TPixelA> aSpan = expected.GetPixelRowSpan(y); Span <TPixelB> bSpan = actual.GetPixelRowSpan(y); PixelOperations <TPixelA> .Instance.ToRgba64(configuration, aSpan, aBuffer); PixelOperations <TPixelB> .Instance.ToRgba64(configuration, bSpan, bBuffer); for (int x = 0; x < width; x++) { int d = GetManhattanDistanceInRgbaSpace(ref aBuffer[x], ref bBuffer[x]); if (d > this.PerPixelManhattanThreshold) { var diff = new PixelDifference(new Point(x, y), aBuffer[x], bBuffer[x]); differences.Add(diff); totalDifference += d; } } } float normalizedDifference = totalDifference / (actual.Width * (float)actual.Height); normalizedDifference /= 4F * 65535F; if (normalizedDifference > this.ImageThreshold) { return(new ImageSimilarityReport <TPixelA, TPixelB>(expected, actual, differences, normalizedDifference)); } else { return(ImageSimilarityReport <TPixelA, TPixelB> .Empty); } }
/// <summary> /// Gets the representation of the pixels as a <see cref="IMemoryGroup{T}"/> containing the backing pixel data of the image /// stored in row major order, as a list of contiguous <see cref="Memory{T}"/> blocks in the source image's pixel format. /// </summary> /// <param name="source">The source image.</param> /// <typeparam name="TPixel">The type of the pixel.</typeparam> /// <returns>The <see cref="IMemoryGroup{T}"/>.</returns> /// <remarks> /// Certain Image Processors may invalidate the returned <see cref="IMemoryGroup{T}"/> and all it's buffers, /// therefore it's not recommended to mutate the image while holding a reference to it's <see cref="IMemoryGroup{T}"/>. /// </remarks> public static IMemoryGroup <TPixel> GetPixelMemoryGroup <TPixel>(this ImageFrame <TPixel> source) where TPixel : unmanaged, IPixel <TPixel> => source?.PixelBuffer.FastMemoryGroup.View ?? throw new ArgumentNullException(nameof(source));
private static ImageFrame ReadImageFrame(Stream stream, byte[] globalColorTable, GraphicControlExtension graphicControlExtension) { var imageDescriptor = ImageDescriptor.Read(stream); var imageFrame = new ImageFrame { ImageDescriptor = imageDescriptor, LocalColorTable = globalColorTable, GraphicControlExtension = graphicControlExtension }; if (imageDescriptor.LocalColorTableFlag) { imageFrame.LocalColorTable = stream.ReadBytes(imageDescriptor.LocalColorTableSize * 3); } imageFrame.ColorDepth = stream.ReadByte(); var lzwDecoder = new LzwDecoder(stream); var imageData = lzwDecoder.DecodeImageData(imageDescriptor.ImageWidth, imageDescriptor.ImageHeight, imageFrame.ColorDepth); ApplicationData.Read(stream); imageFrame.Bitmap = CreateBitmap( imageData, imageFrame.GetPalette(), imageDescriptor.InterlaceFlag, imageDescriptor.ImageWidth, imageDescriptor.ImageHeight); return imageFrame; }
/// <summary> /// Applies the processor to a single image frame. /// </summary> /// <param name="source">the source image.</param> public void Apply(ImageFrame <TPixel> source) { this.BeforeFrameApply(source); this.OnFrameApply(source); this.AfterFrameApply(source); }
public void updateDepthFrame(ImageFrame frame) { depthFrame = frame; }
/// <summary> /// Applies the process to the specified portion of the specified <see cref="ImageFrame{TPixel}" /> at the specified location /// and with the specified size. /// </summary> /// <param name="source">The source image. Cannot be null.</param> protected abstract void OnFrameApply(ImageFrame <TPixel> source);
public void Draw (ApplicationContext actx, Cairo.Context ctx, double scaleFactor, double x, double y, ImageDescription idesc) { if (stockId != null) { ImageFrame frame = null; if (frames != null) frame = frames.FirstOrDefault (f => f.Width == (int) idesc.Size.Width && f.Height == (int) idesc.Size.Height && f.Scale == scaleFactor); if (frame == null) { frame = new ImageFrame (ImageHandler.CreateBitmap (stockId, idesc.Size.Width, idesc.Size.Height, scaleFactor), (int)idesc.Size.Width, (int)idesc.Size.Height, false); frame.Scale = scaleFactor; AddFrame (frame); } DrawPixbuf (ctx, frame.Pixbuf, x, y, idesc); } else if (drawCallback != null) { CairoContextBackend c = new CairoContextBackend (scaleFactor) { Context = ctx }; if (actx != null) { actx.InvokeUserCode (delegate { drawCallback (c, new Rectangle (x, y, idesc.Size.Width, idesc.Size.Height)); }); } else drawCallback (c, new Rectangle (x, y, idesc.Size.Width, idesc.Size.Height)); } else { DrawPixbuf (ctx, GetBestFrame (actx, scaleFactor, idesc.Size.Width, idesc.Size.Height, false), x, y, idesc); } }
/// <inheritdoc/> protected override void OnFrameApply(ImageFrame <TPixel> source, Rectangle sourceRectangle, Configuration configuration) { int startX = sourceRectangle.X; int endX = sourceRectangle.Right; int startY = sourceRectangle.Y; int endY = sourceRectangle.Bottom; // Align start/end positions. int minX = Math.Max(0, startX); int maxX = Math.Min(source.Width, endX); int minY = Math.Max(0, startY); int maxY = Math.Min(source.Height, endY); int width = maxX - minX; var workingRect = Rectangle.FromLTRB(minX, minY, maxX, maxY); IBrush brush = this.definition.Brush; GraphicsOptions options = this.definition.Options; // If there's no reason for blending, then avoid it. if (this.IsSolidBrushWithoutBlending(out SolidBrush solidBrush)) { ParallelExecutionSettings parallelSettings = configuration.GetParallelSettings().MultiplyMinimumPixelsPerTask(4); TPixel colorPixel = solidBrush.Color.ToPixel <TPixel>(); ParallelHelper.IterateRows( workingRect, parallelSettings, rows => { for (int y = rows.Min; y < rows.Max; y++) { source.GetPixelRowSpan(y).Slice(minX, width).Fill(colorPixel); } }); } else { // Reset offset if necessary. if (minX > 0) { startX = 0; } if (minY > 0) { startY = 0; } using (IMemoryOwner <float> amount = source.MemoryAllocator.Allocate <float>(width)) using (BrushApplicator <TPixel> applicator = brush.CreateApplicator( source, sourceRectangle, options)) { amount.GetSpan().Fill(1f); ParallelHelper.IterateRows( workingRect, configuration, rows => { for (int y = rows.Min; y < rows.Max; y++) { int offsetY = y - startY; int offsetX = minX - startX; applicator.Apply(amount.GetSpan(), offsetX, offsetY); } }); } } }
void OnOutput(ImageFrame outputVideo) { if (outputVideo != null) { currentOutput = outputVideo; } }
/// <inheritdoc /> protected override void OnApply(ImageFrame <TPixel> source, Rectangle sourceRectangle, Configuration configuration) { new Convolution2DProcessor <TPixel>(this.KernelX, this.KernelY).Apply(source, sourceRectangle, configuration); }
/// <inheritdoc /> public BrushApplicator <TPixel> CreateApplicator(ImageFrame <TPixel> source, RectangleF region, GraphicsOptions options) { return(new RecolorBrushApplicator(source, this.SourceColor, this.TargeTPixel, this.Threshold, options)); }
/** * Génère, à partir des données brutes fournies par Kinect, un tableau de bit utilisables. * Le format de donnée originale de Kinect est complexe et dépend de nombreux paramètres. * Cette fonction encapsule et rend abstrait la conversion. * * @see http://channel9.msdn.com/Series/KinectSDKQuickstarts/Working-with-Depth-Data */ private byte[] GenerateColoredBytes(ImageFrame imageFrame) { int height = imageFrame.Image.Height; int width = imageFrame.Image.Width; //Depth data for each pixel Byte[] depthData = imageFrame.Image.Bits; //colorFrame contains color information for all pixels in image //Height x Width x 4 (Red, Green, Blue, empty byte) Byte[] colorFrame = new byte[imageFrame.Image.Height * imageFrame.Image.Width * 4]; //Bgr32 - Blue, Green, Red, empty byte //Bgra32 - Blue, Green, Red, transparency //You must set transparency for Bgra as .NET defaults a byte to 0 = fully transparent //hardcoded locations to Blue, Green, Red (BGR) index positions const int BlueIndex = 0; const int GreenIndex = 1; const int RedIndex = 2; const int AlphaIndex = 3; var depthIndex = 0; for (var y = 0; y < height; y++) { var heightOffset = y * width; for (var x = 0; x < width; x++) { var index = ((x + 0) + heightOffset) * 4; //Par défaut, le pixel est blanc. //Pour rappel, le format d'image étant RGBA, chaque pixel contient quatre composantes qui doivent être définies. colorFrame[index + BlueIndex] = 255; colorFrame[index + GreenIndex] = 255; colorFrame[index + RedIndex] = 255; colorFrame[index + AlphaIndex] = 0; //Si le pixel contient un joueur if (GetPlayerIndex(depthData[depthIndex]) > 0) { colorFrame[index + BlueIndex] = 255; colorFrame[index + GreenIndex] = 255; colorFrame[index + RedIndex] = 255; colorFrame[index + AlphaIndex] = playerAlpha; } //jump two bytes at a time depthIndex += 2; } } return colorFrame; }
/// <inheritdoc/> protected override void OnApply(ImageFrame <TPixel> source, Rectangle sourceRectangle, Configuration configuration) { int kernelYHeight = this.KernelY.Height; int kernelYWidth = this.KernelY.Width; int kernelXHeight = this.KernelX.Height; int kernelXWidth = this.KernelX.Width; int radiusY = kernelYHeight >> 1; int radiusX = kernelXWidth >> 1; int startY = sourceRectangle.Y; int endY = sourceRectangle.Bottom; int startX = sourceRectangle.X; int endX = sourceRectangle.Right; int maxY = endY - 1; int maxX = endX - 1; using (var targetPixels = new PixelAccessor <TPixel>(source.Width, source.Height)) { source.CopyTo(targetPixels); Parallel.For( startY, endY, configuration.ParallelOptions, y => { Span <TPixel> sourceRow = source.GetPixelRowSpan(y); Span <TPixel> targetRow = targetPixels.GetRowSpan(y); for (int x = startX; x < endX; x++) { float rX = 0; float gX = 0; float bX = 0; float rY = 0; float gY = 0; float bY = 0; // Apply each matrix multiplier to the color components for each pixel. for (int fy = 0; fy < kernelYHeight; fy++) { int fyr = fy - radiusY; int offsetY = y + fyr; offsetY = offsetY.Clamp(0, maxY); Span <TPixel> sourceOffsetRow = source.GetPixelRowSpan(offsetY); for (int fx = 0; fx < kernelXWidth; fx++) { int fxr = fx - radiusX; int offsetX = x + fxr; offsetX = offsetX.Clamp(0, maxX); Vector4 currentColor = sourceOffsetRow[offsetX].ToVector4().Premultiply(); if (fy < kernelXHeight) { Vector4 kx = this.KernelX[fy, fx] * currentColor; rX += kx.X; gX += kx.Y; bX += kx.Z; } if (fx < kernelYWidth) { Vector4 ky = this.KernelY[fy, fx] * currentColor; rY += ky.X; gY += ky.Y; bY += ky.Z; } } } float red = MathF.Sqrt((rX * rX) + (rY * rY)); float green = MathF.Sqrt((gX * gX) + (gY * gY)); float blue = MathF.Sqrt((bX * bX) + (bY * bY)); ref TPixel pixel = ref targetRow[x]; pixel.PackFromVector4(new Vector4(red, green, blue, sourceRow[x].ToVector4().W).UnPremultiply()); } }); source.SwapPixelsBuffers(targetPixels); } }
static Point[] GetIndices(ImageFrame image) { Debug.Assert(image.bytesPerPixel == 1); List<Point> points = new List<Point>(); for (int y = 0; y < image.height; y++) { for (int x = 0; x < image.width; x++) { int byteNum = (y * image.width + x) * image.bytesPerPixel; if (image.Bytes[byteNum] != 0) { points.Add(new Point(x, y)); } } } return points.ToArray(); }
/// <inheritdoc/> protected override void OnFrameApply(ImageFrame <TPixel> source) { Configuration configuration = this.Configuration; GraphicsOptions options = this.definition.Options; IBrush brush = this.definition.Brush; Region region = this.definition.Region; Rectangle rect = region.Bounds; // Align start/end positions. int minX = Math.Max(0, rect.Left); int maxX = Math.Min(source.Width, rect.Right); int minY = Math.Max(0, rect.Top); int maxY = Math.Min(source.Height, rect.Bottom); if (minX >= maxX) { return; // no effect inside image; } if (minY >= maxY) { return; // no effect inside image; } int maxIntersections = region.MaxIntersections; float subpixelCount = 4; // we need to offset the pixel grid to account for when we outline a path. // basically if the line is [1,2] => [3,2] then when outlining at 1 we end up with a region of [0.5,1.5],[1.5, 1.5],[3.5,2.5],[2.5,2.5] // and this can cause missed fills when not using antialiasing.so we offset the pixel grid by 0.5 in the x & y direction thus causing the# // region to align with the pixel grid. float offset = 0.5f; if (options.Antialias) { offset = 0f; // we are antialiasing skip offsetting as real antialiasing should take care of offset. subpixelCount = options.AntialiasSubpixelDepth; if (subpixelCount < 4) { subpixelCount = 4; } } using (BrushApplicator <TPixel> applicator = brush.CreateApplicator(configuration, options, source, rect)) { int scanlineWidth = maxX - minX; using (IMemoryOwner <float> bBuffer = source.MemoryAllocator.Allocate <float>(maxIntersections)) using (IMemoryOwner <float> bScanline = source.MemoryAllocator.Allocate <float>(scanlineWidth)) { bool scanlineDirty = true; float subpixelFraction = 1f / subpixelCount; float subpixelFractionPoint = subpixelFraction / subpixelCount; Span <float> buffer = bBuffer.Memory.Span; Span <float> scanline = bScanline.Memory.Span; bool isSolidBrushWithoutBlending = this.IsSolidBrushWithoutBlending(out SolidBrush solidBrush); TPixel solidBrushColor = isSolidBrushWithoutBlending ? solidBrush.Color.ToPixel <TPixel>() : default; for (int y = minY; y < maxY; y++) { if (scanlineDirty) { scanline.Clear(); scanlineDirty = false; } float yPlusOne = y + 1; for (float subPixel = y; subPixel < yPlusOne; subPixel += subpixelFraction) { int pointsFound = region.Scan(subPixel + offset, buffer, configuration); if (pointsFound == 0) { // nothing on this line, skip continue; } QuickSort.Sort(buffer.Slice(0, pointsFound)); for (int point = 0; point < pointsFound && point < buffer.Length - 1; point += 2) { // points will be paired up float scanStart = buffer[point] - minX; float scanEnd = buffer[point + 1] - minX; int startX = (int)MathF.Floor(scanStart + offset); int endX = (int)MathF.Floor(scanEnd + offset); if (startX >= 0 && startX < scanline.Length) { for (float x = scanStart; x < startX + 1; x += subpixelFraction) { scanline[startX] += subpixelFractionPoint; scanlineDirty = true; } } if (endX >= 0 && endX < scanline.Length) { for (float x = endX; x < scanEnd; x += subpixelFraction) { scanline[endX] += subpixelFractionPoint; scanlineDirty = true; } } int nextX = startX + 1; endX = Math.Min(endX, scanline.Length); // reduce to end to the right edge nextX = Math.Max(nextX, 0); for (int x = nextX; x < endX; x++) { scanline[x] += subpixelFraction; scanlineDirty = true; } } } if (scanlineDirty) { if (!options.Antialias) { bool hasOnes = false; bool hasZeros = false; for (int x = 0; x < scanlineWidth; x++) { if (scanline[x] >= 0.5) { scanline[x] = 1; hasOnes = true; } else { scanline[x] = 0; hasZeros = true; } } if (isSolidBrushWithoutBlending && hasOnes != hasZeros) { if (hasOnes) { source.GetPixelRowSpan(y).Slice(minX, scanlineWidth).Fill(solidBrushColor); } continue; } } applicator.Apply(scanline, minX, y); } } } } }
Gdk.Pixbuf RenderFrame(ApplicationContext actx, double scaleFactor, double width, double height) { using (var sf = new Cairo.ImageSurface (Cairo.Format.ARGB32, (int)(width * scaleFactor), (int)(height * scaleFactor))) using (var ctx = new Cairo.Context (sf)) { ImageDescription idesc = new ImageDescription () { Alpha = 1, Size = new Size (width * scaleFactor, height * scaleFactor) }; Draw (actx, ctx, 1, 0, 0, idesc); var f = new ImageFrame (ImageBuilderBackend.CreatePixbuf (sf), (int)width, (int)height); AddFrame (f); return f.Pixbuf; } }
/// <summary> /// Associates all non-empty <see cref="IndexFrame"/> objects with the corresponding <see /// cref="UniqueTile"/> objects.</summary> private void CollectTiles() { this._tiles.Clear(); // fixed values for DeBray Bailey's tileset SizeI frameSize = new SizeI(24, 35); PointI offset = new PointI(2, 1); PointI spacing = new PointI(3, 3); const int columns = 20; Trace.WriteLine("\nCollecting Tiles\n----------------"); foreach (ImageFile file in MasterSection.Instance.Images.ImageFiles.Values) { // skip river & road files that were designed for hexagons if (file.Id == "file-rivers" || file.Id == "file-roads") { continue; } // compute number of tile rows WriteableBitmap bitmap = file.Bitmap; int rows = (bitmap.PixelHeight - offset.Y) / (frameSize.Height + spacing.Y) + 1; Trace.WriteLine(String.Format(CultureInfo.InvariantCulture, "\nFile {0} contains {1} tile rows.", file.Id, rows)); for (int y = 0; y < rows; y++) { // sanity check for excessive row count int frameTop = offset.Y + y * (frameSize.Height + spacing.Y); if (frameTop + frameSize.Height > bitmap.Height) { continue; } for (int x = 0; x < columns; x++) { // sanity check for excessive column count int frameLeft = offset.X + x * (frameSize.Width + spacing.X); if (frameLeft + frameSize.Width > bitmap.Width) { continue; } // create ImageFrame with current source & bounds ImageFrame frame = new ImageFrame(); frame.Bounds = new RectI( frameLeft, frameTop, frameSize.Width, frameSize.Height); frame.Source = new ImageFilePair(file.Id, file); // create IndexFrame with current frame & index int index = y * columns + x; IndexFrame indexFrame = new IndexFrame(frame, index); // check for empty bitmap tile if (IsTileEmpty(frame)) { Trace.WriteLine(String.Format(CultureInfo.InvariantCulture, "Empty tile: {0} #{1}", file.Id, index)); continue; } // check for duplicate bitmap tiles foreach (UniqueTile oldTile in this._tiles) { IndexFrame oldFrame = oldTile.FileFrames[0]; if (AreTilesEqual(frame, oldFrame.Frame)) { Trace.WriteLine(String.Format(CultureInfo.InvariantCulture, "Duplicates: {0} #{1} = {2} #{3}", file.Id, index, oldFrame.Frame.Source.Key, oldFrame.Index)); oldTile.FileFrames.Add(indexFrame); goto nextTile; } } // add a new unique bitmap tile UniqueTile tile = new UniqueTile(); tile.FileFrames.Add(indexFrame); this._tiles.Add(tile); nextTile: continue; } } } }
public void updateColorFrame(ImageFrame frame) { colorFrame = frame; }
/// <summary> /// Initializes a new instance of the <see cref="ImageBrush{TPixel}"/> class. /// </summary> /// <param name="image">The image.</param> public ImageBrush(ImageFrame <TPixel> image) { this.image = image; }
Gdk.Pixbuf RenderFrame (ApplicationContext actx, double scaleFactor, double width, double height) { var swidth = Math.Max ((int)(width * scaleFactor), 1); var sheight = Math.Max ((int)(height * scaleFactor), 1); using (var sf = new Cairo.ImageSurface (Cairo.Format.ARGB32, swidth, sheight)) using (var ctx = new Cairo.Context (sf)) { ImageDescription idesc = new ImageDescription () { Alpha = 1, Size = new Size (width, height) }; ctx.Scale (scaleFactor, scaleFactor); Draw (actx, ctx, scaleFactor, 0, 0, idesc); var f = new ImageFrame (ImageBuilderBackend.CreatePixbuf (sf), Math.Max((int)width,1), Math.Max((int)height,1), true); AddFrame (f); return f.Pixbuf; } }
/// <inheritdoc /> public BrushApplicator <TPixel> CreateApplicator(ImageFrame <TPixel> source, RectangleF region, GraphicsOptions options) { return(new ImageBrushApplicator(source, this.image, region, options)); }
double CalcSectionSize(ImageFrame frame, ImageSection sec, double totalVariable, double stretchableSize, ref double remainingVariable) { if (sec.Mode != RenderMode.Fixed) { double sw = Math.Round (totalVariable * (sec.Size / stretchableSize)); if (sw > remainingVariable) sw = remainingVariable; remainingVariable -= sw; return sw; } else { return sec.Size / frame.ScaleFactor; } }
/// <summary> /// Initializes a new instance of the <see cref="ImageFrame{TColor, TPacked}"/> class. /// </summary> /// <param name="frame"> /// The frame to create the frame from. /// </param> public ImageFrame(ImageFrame <TColor, TPacked> frame) : base(frame) { }
BitmapImage GetTile(ImageFrame frame, int tileIndex, Rectangle sourceRegion) { if (frame.TileCache == null) frame.TileCache = new BitmapImage [frame.HorizontalSections.Count * frame.VerticalSections.Count]; var img = frame.TileCache [tileIndex]; if (img != null) return img; img = frame.Bitmap.Crop (sourceRegion); return frame.TileCache [tileIndex] = img; }
/// <summary> /// Applies the process to the specified portion of the specified <see cref="ImageFrame{TPixel}" /> at the specified location /// and with the specified size. /// </summary> /// <param name="source">The source image. Cannot be null.</param> /// <param name="destination">The cloned/destination image. Cannot be null.</param> protected abstract void OnFrameApply(ImageFrame <TPixel> source, ImageFrame <TPixel> destination);
/** * Génère, à partir des données brutes fournies par Kinect, un tableau de bit utilisables. * Le format de donnée originale de Kinect est complexe et dépend de nombreux paramètres. * Cette fonction encapsule et rend abstrait la conversion. * * @see http://channel9.msdn.com/Series/KinectSDKQuickstarts/Working-with-Depth-Data */ private byte[] GenerateColoredBytes(ImageFrame imageFrame) { int height = imageFrame.Image.Height; int width = imageFrame.Image.Width; //Depth data for each pixel Byte[] depthData = imageFrame.Image.Bits; //colorFrame contains color information for all pixels in image //Height x Width x 4 (Red, Green, Blue, empty byte) Byte[] colorFrame = new byte[imageFrame.Image.Height * imageFrame.Image.Width * 4]; //Bgr32 - Blue, Green, Red, empty byte //Bgra32 - Blue, Green, Red, transparency //You must set transparency for Bgra as .NET defaults a byte to 0 = fully transparent //hardcoded locations to Blue, Green, Red (BGR) index positions const int BlueIndex = 0; const int GreenIndex = 1; const int RedIndex = 2; const int AlphaIndex = 3; int depthIndex = 0; int distance = 0; int nbDistance = 1; for (var y = 0; y < height; y++) { var heightOffset = y * width; for (var x = 0; x < width; x++) { var index = ((x + 0) + heightOffset) * 4; //Par défaut, le pixel est blanc. //Pour rappel, le format d'image étant RGBA, chaque pixel contient quatre composantes qui doivent être définies. colorFrame[index + BlueIndex] = 255; colorFrame[index + GreenIndex] = 255; colorFrame[index + RedIndex] = 255; colorFrame[index + AlphaIndex] = 255; //Si le pixel contient un joueur if (GetPlayerIndex(depthData[depthIndex]) > 0) { //L'afficher en blanc transparent. colorFrame[index + BlueIndex] = 0; colorFrame[index + GreenIndex] = 0; colorFrame[index + RedIndex] = 0; colorFrame[index + AlphaIndex] = 0; distance += (int)(depthData[depthIndex] >> 3 | depthData[depthIndex + 1] << 5); nbDistance++; } //jump two bytes at a time depthIndex += 2; } } //On souhaite afficher l'écart par rapport à la distance idéale, afin que toutes les images soient prises à la même profondeur //(les deux progressbar en haut de l'application). //On va donc calculer la véritable distance en moyennant chacun des pixels distance = distance / nbDistance; int distanceTropLoin = Math.Max(0, Math.Min(400, distance - 2400)); int distanceTropProche = Math.Max(0, Math.Min(400, 2400 - distance)); //On règle la valeur des deux barres de progression par rapport à la distance idéale souhaitée if (distance != 0) distanceLoinProgress.Value = distanceTropLoin; else distanceLoinProgress.Value = distanceLoinProgress.Maximum; distanceProcheProgress.Value = distanceTropProche; //On définit par une quasi-règle de trois la couleur à utiliser int distanceCouleur = Math.Max(distanceTropProche, distanceTropLoin); System.Windows.Media.Color couleurProgress = new System.Windows.Media.Color(); couleurProgress.ScR = (float)(distanceCouleur) / 100; couleurProgress.ScG = (float)(400 - distanceCouleur) / 800; couleurProgress.ScA = 1; distanceLoinProgress.Foreground = new SolidColorBrush(couleurProgress); distanceProcheProgress.Foreground = new SolidColorBrush(couleurProgress); return colorFrame; }
/// <summary> /// This method is called after the process is applied to prepare the processor. /// </summary> /// <param name="source">The source image. Cannot be null.</param> /// <param name="destination">The cloned/destination image. Cannot be null.</param> protected virtual void AfterFrameApply(ImageFrame <TPixel> source, ImageFrame <TPixel> destination) { }
public void UpdateDepth(ImageFrame DepthImage) { gDepthImage = DepthImage; gSkeletalJoints.convertDepthFrame(DepthImage.Image.Bits); }
/// <inheritdoc/> protected override void OnFrameApply(ImageFrame <TPixel> source) { using var processor = new Convolution2PassProcessor <TPixel>(this.Configuration, this.KernelX, this.KernelY, false, this.Source, this.SourceRectangle); processor.Apply(source); }
ImageSource RenderFrame(ApplicationContext actx, double scaleFactor, double width, double height) { ImageDescription idesc = new ImageDescription () { Alpha = 1, Size = new Size (width, height) }; SWM.DrawingVisual visual = new SWM.DrawingVisual (); using (SWM.DrawingContext ctx = visual.RenderOpen ()) { ctx.PushTransform (new ScaleTransform (scaleFactor, scaleFactor)); Draw (actx, ctx, scaleFactor, 0, 0, idesc); ctx.Pop (); } SWMI.RenderTargetBitmap bmp = new SWMI.RenderTargetBitmap ((int)(width * scaleFactor), (int)(height * scaleFactor), 96, 96, PixelFormats.Pbgra32); bmp.Render (visual); var f = new ImageFrame (bmp, width, height); AddFrame (f); return bmp; }
/// <inheritdoc/> protected override void AfterFrameApply(ImageFrame <TPixel> source, Rectangle sourceRectangle, Configuration configuration) { new VignetteProcessor <TPixel>(VeryDarkGreen).Apply(source, sourceRectangle, configuration); }