unsafe public static PixelArray<Lumb> Copy(PixelArray<Lumb> dst, GDIDIBSection src) { if (dst.Orientation != src.Orientation) return null; int imageSize = src.Width * src.Height; Lumb* dstPointer = (Lumb*)dst.Pixels.ToPointer(); BGRb* srcPointer = (BGRb*)src.Pixels.ToPointer(); for (int counter = 0; counter < imageSize; counter++) { *dstPointer = new Lumb(NTSCGray.ToLuminance(*srcPointer)); dstPointer++; srcPointer++; } //for (int row = 0; row < src.Height; row++) // for (int column = 0; column < src.Width; column++) // { // BGRb srcPixel = srcPointer[src.CalculateOffset(column, row)]; // byte lum = NTSCGray.ToLuminance(srcPointer[src.CalculateOffset(column, row)]); // dstPointer[dst.CalculateOffset(column, row)] = new Lumb(lum); // } return dst; }
PixelArray<BGRb> CreateSteppedTestImage(int width, int height) { PixelArray<BGRb> picture = new PixelArray<BGRb>(width, height); ColorRGBA black = new ColorRGBA(0, 0, 0); ColorRGBA white = new ColorRGBA(1, 1, 1); ColorRGBA red = new ColorRGBA(1, 0, 0); ColorRGBA blue = new ColorRGBA(0, 1, 0); ColorRGBA generic = new ColorRGBA(0, 0, 0); for (int y = 0; y < picture.Height; y++) { generic.Red = (float)y / picture.Height - 1.0f; generic.Green = 1.0f - (float)y / picture.Height; //generic.Blue = (float)y / picture.Height - 1.0f; for (int x = 0; x < picture.Width; x++) { //if (x < width / 2) // picture.SetColor(x, y, red); //else // picture.SetColor(x, y, white); generic.Blue = (float)x / picture.Width - 1.0f; picture.SetColor(x, y, generic); } } return picture; }
public PictureWindow(int x, int y) : base("Targa Viewer", x, y, 320, 600) { //picture = NewTOAPIA.Drawing.TargaLoader.CreatePixelDataFromFile("marbles.tga"); //picture = NewTOAPIA.Drawing.TargaLoader.CreatePixelDataFromFile("xing_b24.tga"); //picture = NewTOAPIA.Drawing.TargaLoader.CreatePixelDataFromFile("xing_b32.tga"); //picture = NewTOAPIA.Drawing.TargaLoader.CreatePixelDataFromFile("xing_t24.tga"); //picture = NewTOAPIA.Drawing.TargaLoader.CreatePixelDataFromFile("xing_t32.tga"); //picture = NewTOAPIA.Drawing.TargaLoader.CreatePixelDataFromFile("ctc24.tga"); picture = NewTOAPIA.Drawing.TargaLoader.CreatePixelDataFromFile("flag_t32.tga"); //picture = NewTOAPIA.Drawing.TargaLoader.CreatePixelDataFromFile("flag_b32.tga"); //picture = NewTOAPIA.Drawing.TargaLoader.CreatePixelDataFromFile("utc32.tga"); //picture = NewTOAPIA.Drawing.TargaLoader.CreatePixelDataFromFile("utc24.tga"); //pBuffer = new PixelBuffer(picture); PixelArray<BGRb> picBuffer = new PixelArray<BGRb>(picture.Width, picture.Height, picture.Orientation, new PixelInformation(PixelLayout.Bgr, PixelComponentType.Byte)); picBuffer.ColorAccessor.PixBlt(picture, 0, 0); DCT dct = new DCT(); PixelArray<RGBd> coeff = dct.ForwardDCT(picBuffer); tPic = dct.InverseDCT(coeff); }
public GraphPortChunkDecoder(GDIDIBSection pixMap, CommChannel channel) { fGrayImage = new PixelArray<Lumb>(pixMap.Width, pixMap.Height); fChannel = channel; fChannel.ReceivedFrameEvent += FrameReceived; fPixMap = pixMap; }
private void Render(PixelArray pixelArray, Scene scene, Camera camera, string name) { var renderer = new Renderer(RenderData, UseExTracer); using (new LogTimer($"Render {name}")) { renderer.Render(pixelArray, camera, scene, UseKdTree); string filename = UseExTracer ? $"{name}_scene_ex.png" : $"{name}_scene.png"; Directory.CreateDirectory(OutputDirectory); pixelArray.SaveAsFile(Path.Combine(OutputDirectory, filename)); } }
private static async Task Render(IRayTracer rayTracer, RayTraceRenderData renderData, string filePath) { var pixelArray = new PixelArray(renderData.Width, renderData.Height); var renderer = new SceneRenderer(); renderer.Progress += (sender, eventArgs) => Console.Write($"...{eventArgs.PercentComplete}%"); await renderer.RayTraceSceneAsync(rayTracer.GetPixelColor, pixelArray, renderData.MaxParallelism).ConfigureAwait(false); pixelArray.SaveToPng(filePath); Console.WriteLine(); Console.WriteLine($"Saved image to {filePath}"); }
/// <summary> /// Copy from a Luminance image to the BGRb pixel buffer. /// </summary> /// <param name="dst">The destination of the copy.</param> /// <param name="src">The source of the copy.</param> unsafe public static GDIDIBSection Copy(GDIDIBSection dst, PixelArray<Lumb> src) { int imageSize = src.Width * src.Height; for (int y = 0; y < src.Height; y++) for (int x = 0; x < src.Width; x++) { dst.SetColor(x, y, src.GetColor(x, y)); } return dst; }
public GraphPortChunkDecoder(GDIDIBSection pixMap, PayloadChannel channel) { fGrayImage = new PixelArray<Lumb>(pixMap.Width, pixMap.Height); fChannel = channel; if (fChannel != null) { fChannel.FrameReceivedEvent += FrameReceived; } fPixMap = pixMap; }
/// <summary> /// Performs pixel level collision between two masks /// </summary> /// <param name="mask1">The mask1.</param> /// <param name="mask2">The mask2.</param> /// <param name="x">The x.</param> /// <param name="y">The y.</param> /// <returns></returns> public static bool Collision(string mask1, string mask2, float x, float y) { CanvasElement canvas1 = _masks[mask1]; if (x > canvas1.Width || y > canvas1.Height) { return(false); } CanvasElement canvas2 = _masks[mask2]; if (canvas2.Width + x < 0 || canvas2.Height + y < 0) { return(false); } int top = Math.Round(Math.Max(0, y)); int height = Math.Round(Math.Min(canvas1.Height, y + canvas2.Height) - top); int left = Math.Round(Math.Max(0, x)); int width = Math.Round(Math.Min(canvas1.Width, x + canvas2.Width) - left); if (width <= 0 || height <= 0) { return(false); } CanvasElement checkCanvas = (CanvasElement)Document.CreateElement("Canvas"); checkCanvas.Width = width; checkCanvas.Height = height; CanvasContext2D context = (CanvasContext2D)checkCanvas.GetContext(Rendering.Render2D); context.FillStyle = "white"; context.FillRect(0, 0, checkCanvas.Width, checkCanvas.Height); context.CompositeOperation = CompositeOperation.Xor; context.DrawImage(canvas1, left, top, width, height, 0, 0, width, height); context.DrawImage(canvas2, Math.Round(left - x), Math.Round(top - y), width, height, 0, 0, width, height); PixelArray data = context.GetImageData(0, 0, width, height).Data; for (int i = 0; i < data.Length; i += 4) { if ((int)data[i] > 0) { return(true); } } return(false); }
public SnapperWindow(int x, int y, int width, int height) : base("Snap N Share", 10, 10, 640, 480) { // Show a form so we can capture the desired group IP and port number ServerForm groupForm = new ServerForm(); //IPAddress randomAddress = NewTOAPIA.Net.Utility.GetRandomMulticastAddress(); //groupForm.groupAddressField.Text = randomAddress.ToString(); groupForm.ShowDialog(); // Get the address and port from the form fUseGray = groupForm.checkBox1.Checked; string groupIP = groupForm.groupAddressField.Text; int groupPort = int.Parse(groupForm.groupPortField.Text); IPEndPoint ipep = new IPEndPoint(IPAddress.Parse(groupIP), groupPort); // Set our title to the address specified so the user // can easily identify their session. Title = "SnapNShare - " + ipep.ToString(); fSnapper = new ScreenSnapper(); fClientOrigin = new POINT(); int pwidth = ClientRectangle.Width; int pheight = ClientRectangle.Height; fScreenImage = new GDIDIBSection(width, height, BitCount.Bits24); fGrayImage = new PixelArray<Lumb>(width, height,fScreenImage.Orientation, new Lumb()); BackgroundColor = RGBColor.White; this.Opacity = 0.5; // Create the MultiSession object so we can send stuff out to a group //fSession = new MultiSession(Guid.NewGuid().ToString(), ipep); fSession = new MultiSession(ipep, Guid.NewGuid().ToString(), "William", true, true, null); // Add the channel for graphics commands PayloadChannel payloadChannel = fSession.CreateChannel(PayloadType.dynamicPresentation); fCommandDispatcher = new GraphPortChunkEncoder(payloadChannel); fUserIOChannel = fSession.CreateChannel(PayloadType.xApplication2); //fUserIOEncoder = new UserIOChannelEncoder(fUserIOChannel); //fUserIODecoder = new UserIOChannelDecoder(fUserIOChannel); //fUserIODecoder.MouseActivityEvent += new MouseActivityEventHandler(fUserIODecoder_MouseActivityEvent); //fUserIODecoder.KeyboardActivityEvent += new KeyboardActivityEventHandler(fUserIODecoder_KeyboardActivityEvent); // Start the thread that will take snapshots of the screen fGlobalTimer = new PrecisionTimer(); fFrameRate = 2; // Frames per second fSnapperRunning = true; snapperThread = new Thread(RunSnaps); snapperThread.Start(); }
PixelArray<BGRAb> CreateAlphaMask(int width, int height) { PixelArray<BGRAb> alphaMask = new PixelArray<BGRAb>(width, height); for (int row = 0; row < height; row++) for (int column = 0; column < width; column++) { BGRAb pixel = alphaMask.RetrievePixel(column, row); float fraction = (float)column / copyBuffer.Width; pixel.Alpha = (byte)(255 - fraction * 255); alphaMask.AssignPixel(column, row, pixel); } return alphaMask; }
public Image(int width, int height) { this.width = width; this.height = height; pixelArray = new PixelArray(width, height); for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { pixelArray.Add(Color.white); } } }
/// <summary> /// Copy from a Luminance image to the BGRb pixel buffer. /// </summary> /// <param name="dst">The destination of the copy.</param> /// <param name="src">The source of the copy.</param> unsafe public static GDIDIBSection Copy(GDIDIBSection dst, PixelArray<Lumb> src) { Lumb* srcPointer = (Lumb*)src.Pixels.ToPointer(); BGRb* dstPointer = (BGRb*)dst.Pixels.ToPointer(); for (int row = 0; row < src.Height; row++) for (int column = 0; column < src.Width; column++) { Lumb lumPixel = srcPointer[src.CalculateOffset(column, row)]; byte lum = lumPixel.Lum; dstPointer[dst.Accessor.CalculateOffset(column, row)] = new BGRb(lum, lum, lum); } return dst; }
// Generalized bit block transfer public override void PixBlt(PixelArray pixBuff, int x, int y) { // Create a buffer // It has to be big enough for the bitmap data, as well as the x,y, and command int dataSize = pixBuff.BytesPerRow * pixBuff.Height; BufferChunk chunk = new BufferChunk(dataSize + 128); // now put the basic command and simple components in chunk += GDI32.EMR_BITBLT; ChunkUtils.Pack(chunk, x, y); ChunkUtils.Pack(chunk, pixBuff.Width, pixBuff.Height); chunk += dataSize; // Finally, copy in the data chunk.CopyFrom(pixBuff.PixelData, dataSize); SendCommand(chunk); }
unsafe public static PixelArray<Lumb> Copy(PixelArray<Lumb> dst, GDIDIBSection src) { if (dst.Orientation != src.Orientation) return null; int imageSize = src.Width * src.Height; Lumb* dstPointer = (Lumb*)dst.Pixels.ToPointer(); BGRb* srcPointer = (BGRb*)src.Pixels.ToPointer(); for (int y = 0; y < src.Height; y++) for (int x = 0; x < src.Width; x++) { dst.AssignPixel(x, y, new Lumb(NTSCGray.ToLuminance(*srcPointer))); } return dst; }
public Color GetColorFromClick(ElementEvent e) { ImageElement image = Document.GetElementById <ImageElement>("colorhex"); CanvasElement canvas = (CanvasElement)Document.CreateElement("canvas"); canvas.Width = image.Width; canvas.Height = image.Height; CanvasContext2D ctx = (CanvasContext2D)canvas.GetContext(Rendering.Render2D); ctx.DrawImage(image, 0, 0); PixelArray pixels = ctx.GetImageData(e.OffsetX, e.OffsetY, 1, 1).Data; Color = Color.FromArgb((float)pixels[3], (float)pixels[0], (float)pixels[1], (float)pixels[2]); return(Color); }
public SnapperWindow(int x, int y, int width, int height) : base("Pixel Share", 10, 10, 640, 480) { // Show a form so we can capture the desired group IP and port number ServerForm groupForm = new ServerForm(); //IPAddress randomAddress = NewTOAPIA.Net.Utility.GetRandomMulticastAddress(); //groupForm.groupAddressField.Text = randomAddress.ToString(); groupForm.ShowDialog(); // Get the address and port from the form fUseGray = groupForm.checkBox1.Checked; string groupIP = groupForm.groupAddressField.Text; int groupPort = int.Parse(groupForm.groupPortField.Text); IPEndPoint ipep = new IPEndPoint(IPAddress.Parse(groupIP), groupPort); // Set our title to the address specified so the user // can easily identify their session. Title = "SnapNShare - " + ipep.ToString(); fSnapper = new ScreenSnapper(); fClientOrigin = new POINT(); int pwidth = ClientRectangle.Width; int pheight = ClientRectangle.Height; fScreenImage = new GDIDIBSection(width, height, BitCount.Bits24); fGrayImage = new PixelArray<Lumb>(width, height,fScreenImage.Orientation, new Lumb()); BackgroundColor = RGBColor.White; this.Opacity = 0.5; // Add the channel for graphics commands CommChannel graphicsChannel = new CommChannel(ipep, true, false); fCommandDispatcher = new GraphPortChunkEncoder(graphicsChannel); // Start the thread that will take snapshots of the screen fGlobalTimer = new PrecisionTimer(); fFrameRate = 2; // Frames per second fSnapperRunning = true; snapperThread = new Thread(RunSnaps); snapperThread.Start(); }
GraphPortChunkEncoder fCommandDispatcher; // Used to do PixBlt to the net #endregion public DesktopCapture(ConferenceAttendee attendee) { fAttendee = attendee; fContext = GDIContext.CreateForDefaultDisplay(); //fCommandDispatcher = new GraphPortChunkEncoder(attendee); int width = 800; int height = 600; fResolution = new Resolution(width, height); fScreenImage = new GDIDIBSection(width, height); fGrayImage = new PixelArray<Lumb>(width, height, fScreenImage.Orientation, new Lumb()); // Start the thread that will take snapshots of the screen fGlobalTimer = new PrecisionTimer(); fFrameRate = 10; // Frames per second fSnapperRunning = true; fSnapperThread = new Thread(RunSnaps); // WAA //fSnapperThread.Start(); }
public RenderService() { _renderDataModel = new RenderDataModel(); _scene = SceneFactory.CreateBasicScene(); _camera = new Camera( new PosVector(7.5, 7.5, 2.3), new PosVector(0.0, 0.0, 0.0), new PosVector(0.0, 0.0, 1.0), 50.0); _renderer = new Renderer(_renderData, false); _pixelArray = new PixelArray(_renderData.Width, _renderData.Height); Task.Run(() => { while (true) { _renderer.Render(_pixelArray, _camera, _scene, true); } }); }
void SnapClientArea() { Rectangle cRect = new Rectangle(new Point(0, 0), new Size(fResolution.Columns, fResolution.Rows)); // If we have resized since last picture, then // resize the capture buffer before taking // the next snapshot. if (fNeedsResize) { fScreenImage = new GDIDIBSection(cRect.Width, cRect.Height); fGrayImage = new PixelArray<Lumb>(cRect.Width, cRect.Height, fScreenImage.Orientation, new Lumb()); fNeedsResize = false; } // Now we actually take the snapshot. // We pass in the client area, based in screen coordinates // and the PixelBuffer object to capture into. SnapAPicture(new Rectangle(0, 0, fScreenImage.Width, fScreenImage.Height), fScreenImage); }
public virtual void PixmapShardBlt(PixelArray pixmap, Rectangle srcBoundary, Rectangle destinationRect) { int srcX = srcBoundary.X; int srcY = srcBoundary.Y; int width = srcBoundary.Width; int height = srcBoundary.Height; int dstX = destinationRect.X; int dstY = destinationRect.Y; // Now we have srcRect representing the fraction of the pixArray that we want to display. // It's been clipped to the boundary of the IPixelArray that we're holding onto // 2. Copy the pixels from the source to our destination // Perform a simple color copy for (int column = 0; column < width; column++) for (int row = 0; row < height; row++) { //ColorRGBA aColor = pixmap.GetColor(column + srcX, row + srcY); //fDstAccess.SetColor(column + dstX, row + dstY, aColor); } }
public virtual void PixBlt(PixelArray pixArray, int x, int y) { // 1. Calculate the intersection intended destination rectangle // of the pixArray and the boundary of the pixelArray we're // holding onto. Rectangle srcRect = new Rectangle(x, y, pixArray.Width, pixArray.Height); // Create the boundary rectangle for our destination Rectangle dstRect = new Rectangle(0, 0, fDstAccess.Width, fDstAccess.Height); // Create the intersection of the dstRect and the srcRect srcRect.Intersect(dstRect); // If there is no intersection, then just return if (srcRect.IsEmpty) return; Rectangle srcBoundary = srcRect; srcBoundary.Offset(-x, -y); PixmapShardBlt(pixArray, srcBoundary, srcRect); }
public PictureWindow(int locx, int locy) : base("Run Length Encoding Viewer", locx, locy, 640, 480) { //picture = NewTOAPIA.Drawing.TargaLoader.CreatePixelDataFromFile("marbles.tga"); //picture = NewTOAPIA.Drawing.TargaLoader.CreatePixelDataFromFile("xing_b24.tga"); //picture = NewTOAPIA.Drawing.TargaLoader.CreatePixelDataFromFile("xing_b32.tga"); //picture = NewTOAPIA.Drawing.TargaLoader.CreatePixelDataFromFile("xing_t24.tga"); //picture = NewTOAPIA.Drawing.TargaLoader.CreatePixelDataFromFile("xing_t32.tga"); //picture = NewTOAPIA.Drawing.TargaLoader.CreatePixelDataFromFile("ctc24.tga"); //picture = NewTOAPIA.Drawing.TargaLoader.CreatePixelDataFromFile("flag_t32.tga"); //picture = NewTOAPIA.Drawing.TargaLoader.CreatePixelDataFromFile("flag_b32.tga"); //picture = NewTOAPIA.Drawing.TargaLoader.CreatePixelDataFromFile("utc32.tga"); //picture = NewTOAPIA.Drawing.TargaLoader.CreatePixelDataFromFile("utc24.tga"); //picture = CreateColorTestImage(40,40); //picture = CreateSteppedTestImage(128, 32); picture = CreateBarsTestImage(10); // Perform the runlength encoding on the image pixMap = new PixelArray<BGRb>(picture); RLC rlc = new RLC(); //NewTOAPIA.Imaging.RunLengthCodec rlc = new RunLengthCodec(); MemoryStream ms = new MemoryStream(); rlc.Encode(new PixelAccessorBGRb(pixMap), ms); byte[] imageBytes = ms.GetBuffer(); //Console.WriteLine("Image Bytes: {0}", imageBytes.Length); // Now decode the bytes back into a new image ms.Seek(0, SeekOrigin.Begin); tPic = new PixelArray<BGRb>(pixMap.Width, pixMap.Height, pixMap.Orientation, new PixelInformation(PixelLayout.Bgr, PixelComponentType.Byte)); PixelAccessorBGRb accessor = new PixelAccessorBGRb(tPic); rlc.Decode(ms, accessor); }
public void PickColor(ElementEvent e) { DivElement picker = Document.GetElementById <DivElement>("colorpicker"); ImageElement image = Document.GetElementById <ImageElement>("colorhex"); CanvasElement canvas = (CanvasElement)Document.CreateElement("canvas"); canvas.Width = image.Width; canvas.Height = image.Height; CanvasContext2D ctx = (CanvasContext2D)canvas.GetContext(Rendering.Render2D); ctx.DrawImage(image, 0, 0); PixelArray pixels = ctx.GetImageData(e.OffsetX, e.OffsetY, 1, 1).Data; Color = Color.FromArgb((float)pixels[3], (float)pixels[0], (float)pixels[1], (float)pixels[2]); if (CallBack != null) { CallBack(Color); } }
public void Scale(int newWidth, int newHeight) { float xScale = (float)newWidth / (float)(width - 1); float yScale = (float)newHeight / (float)(height - 1); var newPixelArray = new PixelArray(newWidth, newHeight); for (int y = 0; y < newHeight; y++) { for (int x = 0; x < newWidth; x++) { var color = GetPixel( (int)(1 + x / xScale), (int)(1 + y / yScale) ); newPixelArray.Add(color); } } width = newWidth; height = newHeight; pixelArray = newPixelArray; }
public static IPixelArray CreatePixelDataFromFile(string filename) { int tgaSize; bool isExtendedFile; // Open the file. if ((null == filename) || (string.Empty == filename)) return null; FileStream filestream = new FileStream(filename, FileMode.Open, FileAccess.Read); BinaryReader reader = new BinaryReader(filestream); // Targa images come in many different formats, and there are a couple of different versions // of the specification. // First thing to do is determine if the file is adhereing to version 2.0 of the spcification. // We do that by reading a 'footer', which is the last 26 bytes of the file. long fileLength = filestream.Length; long seekPosition = filestream.Seek(fileLength - 26, SeekOrigin.Begin); byte[] targaFooterBytes = reader.ReadBytes(26); string targaXFileID = "TRUEVISION-XFILE"; string targaFooterSignature = System.Text.ASCIIEncoding.ASCII.GetString(targaFooterBytes, 8, 16); TargaFooter footer = null; isExtendedFile = (0 == string.Compare(targaFooterSignature, targaXFileID)); if (isExtendedFile) { // Since we now know it's an extended file, // we'll create the footer object and fill // in the details. footer = new TargaFooter(); // Of the 26 bytes we read from the end of the file // the bytes are layed out as follows. //Bytes 0-3: The Extension Area Offset //Bytes 4-7: The Developer Directory Offset //Bytes 8-23: The Signature //Byte 24: ASCII Character “.” //Byte 25: Binary zero string terminator (0x00) // We take those raw bytes, and turn them into meaningful fields // in the footer object. footer.ExtensionAreaOffset = BitConverter.ToInt32(targaFooterBytes, 0); footer.DeveloperDirectoryOffset = BitConverter.ToInt32(targaFooterBytes, 4); footer.Signature = targaFooterSignature; footer.Period = (byte)'.'; footer.BinaryZero = 0; } TargaHeader fHeader = new TargaHeader(); // If you want to use unsafe code, you could do the following // there are two primary drawbacks. // 1. The targa image data is in Little-Endian format. On platforms // that are big-endian, the data will not necessarily show up correctly in the header. // 2. You must compile with unsafe code. That may or may not be a problem depending // on the application, but it really isn't a necessity. // // The speed gain may not be realized, so there's really no reason for the added complexity. // First, just reader enough of bytes from the file to capture the // header into a chunk of memory. //byte[] headerBytes = reader.ReadBytes(Marshal.SizeOf(fHeader)); //IntPtr headerPtr; //unsafe //{ // GCHandle dataHandle = GCHandle.Alloc(headerBytes, GCHandleType.Pinned); // try // { // headerPtr = (IntPtr)dataHandle.AddrOfPinnedObject(); // // Now use the marshaller to copy the header bytes into a structure // fHeader = (TargaHeader)Marshal.PtrToStructure(headerPtr, typeof(TargaHeader)); // } // finally // { // dataHandle.Free(); // } //} filestream.Seek(0, SeekOrigin.Begin); fHeader.IDLength = reader.ReadByte(); fHeader.ColorMapType = (TargaColorMapType)reader.ReadByte(); fHeader.ImageType = (TargaImageType)reader.ReadByte(); fHeader.CMapStart = reader.ReadInt16(); fHeader.CMapLength = reader.ReadInt16(); fHeader.CMapDepth = reader.ReadByte(); // Image description fHeader.XOffset = reader.ReadInt16(); fHeader.YOffset = reader.ReadInt16(); fHeader.Width = reader.ReadInt16(); // Width of image in pixels fHeader.Height = reader.ReadInt16(); // Height of image in pixels fHeader.PixelDepth = reader.ReadByte(); // How many bits per pixel fHeader.ImageDescriptor = reader.ReadByte(); // Image Descriptor Byte. | // Bits 3-0 - number of attribute bits associated with each | // pixel. For the Targa 16, this would be 0 or | // 1. For the Targa 24, it should be 0. For | // Targa 32, it should be 8. | // Bit 4 - controls left/right transfer of pixels to /// the screen. /// 0 = left to right /// 1 = right to left // Bit 5 - controls top/bottom transfer of pixels to /// the screen. /// 0 = bottom to top /// 1 = top to bottom /// /// In Combination bits 5/4, they would have these values /// 00 = bottom left /// 01 = bottom right /// 10 = top left /// 11 = top right /// // Bits 7-6 - Data storage interleaving flag. | // 00 = non-interleaved. | // 01 = two-way (even/odd) interleaving. | // 10 = four way interleaving. | // 11 = reserved. byte desc = fHeader.ImageDescriptor; byte attrBits = (byte)(desc & 0x0F); ImageOrigin origin = (ImageOrigin)(desc & (byte)ImageOrigin.OriginMask); byte interleave = (byte)((desc & 0xC0) >> 6); // This routine can only deal with the uncompressed image types. // So, fail if this is not the case. if ((TargaImageType.TrueColor != fHeader.ImageType) && (TargaImageType.Monochrome != fHeader.ImageType)) { filestream.Close(); return null; } PixmapOrientation pixmapOrientation = PixmapOrientation.BottomToTop; if ((ImageOrigin.BottomLeft == origin) || (ImageOrigin.BottomRight == origin)) pixmapOrientation = PixmapOrientation.BottomToTop; else pixmapOrientation = PixmapOrientation.TopToBottom; int bytesPerPixel = fHeader.PixelDepth / 8; // Skip past the Image Identification field byte[] ImageIdentification; if (fHeader.IDLength > 0) ImageIdentification = reader.ReadBytes(fHeader.IDLength); // calculate image size based on bytes per pixel, width and height. tgaSize = fHeader.Width * fHeader.Height * bytesPerPixel; byte[] imageData = reader.ReadBytes((int)tgaSize); filestream.Close(); // Pin the array in mememory, and get a data pointer to it IntPtr dataPtr; unsafe { GCHandle dataHandle = GCHandle.Alloc(imageData, GCHandleType.Pinned); dataPtr = (IntPtr)dataHandle.AddrOfPinnedObject(); // Create the appropriate PixelArray // then create an accessor to match // Copy the data from the buffer pointer to the new array switch (bytesPerPixel) { case 3: { PixelAccessorBGRb srcAccess = new PixelAccessorBGRb(fHeader.Width, fHeader.Height, pixmapOrientation, dataPtr); PixelArray<BGRb> pixmap = new PixelArray<BGRb>(srcAccess); return pixmap; } break; case 4: { PixelAccessorBGRAb srcAccess = new PixelAccessorBGRAb(fHeader.Width, fHeader.Height, pixmapOrientation, dataPtr); PixelArray<BGRAb> pixmap = new PixelArray<BGRAb>(srcAccess); return pixmap; } break; case 1: { PixelAccessorLumb srcAccess = new PixelAccessorLumb(fHeader.Width, fHeader.Height, pixmapOrientation, dataPtr); PixelArray<Lumb> pixmap = new PixelArray<Lumb>(srcAccess); return pixmap; } break; } } return null; }
/* Fast DCT algorithm due to Arai, Agui, Nakajima * Implementation due to Tim Kientzle */ public void ForwardDCT(PixelArray<BGRb> tga, double [,]data, int xpos, int ypos) { int i; int [,] rows= new int[8,8]; const int c1=1004 /* cos(pi/16) << 10 */, s1=200 /* sin(pi/16) */, c3=851 /* cos(3pi/16) << 10 */, s3=569 /* sin(3pi/16) << 10 */, r2c6=554 /* sqrt(2)*cos(6pi/16) << 10 */, r2s6=1337 /* sqrt(2)*sin(6pi/16) << 10 */, r2=181; /* sqrt(2) << 7*/ int x0,x1,x2,x3,x4,x5,x6,x7,x8; /* transform rows */ for (i=0; i<8; i++) { x0 = pixel(tga, xpos+0, ypos+i); x1 = pixel(tga, xpos+1, ypos+i); x2 = pixel(tga, xpos+2, ypos+i); x3 = pixel(tga, xpos+3, ypos+i); x4 = pixel(tga, xpos+4, ypos+i); x5 = pixel(tga, xpos+5, ypos+i); x6 = pixel(tga, xpos+6, ypos+i); x7 = pixel(tga, xpos+7, ypos+i); /* Stage 1 */ x8=x7+x0; x0-=x7; x7=x1+x6; x1-=x6; x6=x2+x5; x2-=x5; x5=x3+x4; x3-=x4; /* Stage 2 */ x4=x8+x5; x8-=x5; x5=x7+x6; x7-=x6; x6=c1*(x1+x2); x2=(-s1-c1)*x2+x6; x1=(s1-c1)*x1+x6; x6=c3*(x0+x3); x3=(-s3-c3)*x3+x6; x0=(s3-c3)*x0+x6; /* Stage 3 */ x6=x4+x5; x4-=x5; x5=r2c6*(x7+x8); x7=(-r2s6-r2c6)*x7+x5; x8=(r2s6-r2c6)*x8+x5; x5=x0+x2; x0-=x2; x2=x3+x1; x3-=x1; /* Stage 4 and output */ rows[i][0]=x6; rows[i][4]=x4; rows[i][2]=x8>>10; rows[i][6]=x7>>10; rows[i][7]=(x2-x5)>>10; rows[i][1]=(x2+x5)>>10; rows[i][3]=(x3*r2)>>17; rows[i][5]=(x0*r2)>>17; } /* transform columns */ for (i=0; i<8; i++) { x0 = rows[0][i]; x1 = rows[1][i]; x2 = rows[2][i]; x3 = rows[3][i]; x4 = rows[4][i]; x5 = rows[5][i]; x6 = rows[6][i]; x7 = rows[7][i]; /* Stage 1 */ x8=x7+x0; x0-=x7; x7=x1+x6; x1-=x6; x6=x2+x5; x2-=x5; x5=x3+x4; x3-=x4; /* Stage 2 */ x4=x8+x5; x8-=x5; x5=x7+x6; x7-=x6; x6=c1*(x1+x2); x2=(-s1-c1)*x2+x6; x1=(s1-c1)*x1+x6; x6=c3*(x0+x3); x3=(-s3-c3)*x3+x6; x0=(s3-c3)*x0+x6; /* Stage 3 */ x6=x4+x5; x4-=x5; x5=r2c6*(x7+x8); x7=(-r2s6-r2c6)*x7+x5; x8=(r2s6-r2c6)*x8+x5; x5=x0+x2; x0-=x2; x2=x3+x1; x3-=x1; /* Stage 4 and output */ data[0][i]=(double)((x6+16)>>3); data[4][i]=(double)((x4+16)>>3); data[2][i]=(double)((x8+16384)>>13); data[6][i]=(double)((x7+16384)>>13); data[7][i]=(double)((x2-x5+16384)>>13); data[1][i]=(double)((x2+x5+16384)>>13); data[3][i]=(double)(((x3>>8)*r2+8192)>>12); data[5][i]=(double)(((x0>>8)*r2+8192)>>12); } }
PixelArray<BGRb> CreateBarsTestImage(int maxBar) { int width = 128; int height = 128; PixelArray<BGRb> picture = new PixelArray<BGRb>(width, height); ColorRGBA black = new ColorRGBA(0, 0, 0); ColorRGBA white = new ColorRGBA(1, 1, 1); ColorRGBA red = new ColorRGBA(1, 0, 0); ColorRGBA green = new ColorRGBA(0, 1, 0); ColorRGBA blue = new ColorRGBA(0, 0, 1); ColorRGBA generic = new ColorRGBA(0, 0, 0); for (int y = 0; y < picture.Height; y++) { int x = 0; for (int bars = 1; bars <= maxBar; bars++) { for (int column =0; column< bars; column++) { picture.SetColor(x + column, y, red); } x += bars + 4; //picture.ColorAccessor.SetColor(x, y, generic); //generic.Blue = (float)x / picture.Width - 1.0f; } //generic.Red = (float)y / picture.Height - 1.0f; //generic.Green = 1.0f - (float)y / picture.Height; //generic.Blue = (float)y / picture.Height - 1.0f; } return picture; }
//IntPtr fUserIODecoder_KeyboardActivityEvent(object sender, KeyboardActivityArgs ke) //{ // if (!fAllowRemoteControl) // return IntPtr.Zero; // Console.WriteLine("Received Key Event: {0} {1} {2}", InputSimulator.KeyEvents, ke.AcitivityType, ke.VirtualKeyCode); // InputSimulator.SimulateKeyboardActivity(ke.VirtualKeyCode, ke.AcitivityType); // return IntPtr.Zero; //} //void fUserIODecoder_MouseActivityEvent(object sender, MouseActivityArgs me) //{ // if (fAllowRemoteControl) // { // // What we've received are window relative coordinates. // // First we need to convert them to screen relative coordinates // POINT aPoint = new POINT(me.X, me.Y); // User32.ClientToScreen(Handle, ref aPoint); // // Now for input simulation, we need to turn the screen // // point into a normalized range of 0 to 65535 // // Normalize the point // Screen myScreen = Screen.FromHandle(Handle); // Rectangle screenRect = myScreen.Bounds; // float xFrac = (float)aPoint.X / screenRect.Width; // float yFrac = (float)aPoint.Y / screenRect.Height; // int normalizedX = (int)(xFrac * 65535); // int normalizedY = (int)(yFrac * 65535); // // And finally, send the input // InputSimulator.SimulateMouseActivity(normalizedX, normalizedY, me.Delta, me.ButtonActivity); // } //} #endregion void SnapClientArea() { Rectangle cRect = ClientRectangle; // If we have resized since last picture, then // resize the capture buffer before taking // the next snapshot. if (fNeedsResize) { fScreenImage = new GDIDIBSection(cRect.Width, cRect.Height, BitCount.Bits24); fGrayImage = new PixelArray<Lumb>(cRect.Width, cRect.Height, fScreenImage.Orientation, new Lumb()); fNeedsResize = false; } // To take a snapshot, we need to convert the client area's upper left corner // to screen space, as the device context we're using is for the whole screen. // So, we get the origin, and make the User32 call to convert that to screen space. fClientOrigin = new POINT(0, 0); User32.ClientToScreen(Handle, ref fClientOrigin); // Now we actually take the snapshot. // We pass in the client area, based in screen coordinates // and the PixelBuffer object to capture into. fSnapper.SnapAPicture(new Rectangle(fClientOrigin.X, fClientOrigin.Y, fScreenImage.Width, fScreenImage.Height), fScreenImage); }
public virtual void ReceiveChunk(BufferChunk aRecord) { // First read out the record type int recordType = aRecord.NextInt32(); // Then deserialize the rest from there switch (recordType) { //case (int)UICommands.PixBlt: // { // // Get the X, Y // int x = aRecord.NextInt32(); // int y = aRecord.NextInt32(); // // get the length of the image bytes buffer // int dataSize = aRecord.NextInt32(); // byte[] imageBytes = (byte[])aRecord; // // Create a memory stream from the imageBytes // MemoryStream ms = new MemoryStream(imageBytes, false); // // Create a pixelArray from the stream // Bitmap bm = (Bitmap)Bitmap.FromStream(ms); // PixelArray<BGRAb> pixMap = PixelBufferHelper.CreatePixelArrayFromBitmap(bm); // // And finally, call the PixBlt function // PixBltBGRAb(pixMap, x, y); // } // break; case (int)UICommands.PixBltRLE: { // Get the X, Y int x = aRecord.NextInt32(); int y = aRecord.NextInt32(); int width = aRecord.NextInt32(); int height = aRecord.NextInt32(); // get the length of the image bytes buffer int dataSize = aRecord.NextInt32(); byte[] imageBytes = (byte[])aRecord; // Create a memory stream from the imageBytes MemoryStream ms = new MemoryStream(imageBytes, false); // Create a pixelArray from the stream if ((width != fPixMap.Width) || (height != fPixMap.Height)) fPixMap = new GDIDIBSection(width, height); TargaRunLengthCodec rlc = new TargaRunLengthCodec(); PixelAccessorBGRb accessor = new PixelAccessorBGRb(fPixMap.Width, fPixMap.Height, fPixMap.Orientation, fPixMap.Pixels, fPixMap.BytesPerRow); rlc.Decode(ms, accessor); // And finally, call the local PixBlt function PixBltPixelBuffer24(fPixMap, x, y); } break; case (int)UICommands.PixBltLuminance: { // Get the X, Y int x = aRecord.NextInt32(); int y = aRecord.NextInt32(); int width = aRecord.NextInt32(); int height = aRecord.NextInt32(); // get the length of the image bytes buffer int dataSize = aRecord.NextInt32(); byte[] imageBytes = (byte[])aRecord; // Create a memory stream from the imageBytes MemoryStream ms = new MemoryStream(imageBytes, false); // Create a pixelArray from the stream if ((width != fGrayImage.Width) || (height != fGrayImage.Height)) fGrayImage = new PixelArray<Lumb>(width, height); TargaLuminanceRLE rlc = new TargaLuminanceRLE(); rlc.Decode(ms, fGrayImage); // And finally, call the local PixBlt function PixBltLumb(fGrayImage, x, y); } break; case (int)UICommands.Showcursor: { ShowCursor(); } break; case (int)UICommands.HideCursor: { HideCursor(); } break; case (int)UICommands.MoveCursor: { int x = aRecord.NextInt32(); int y = aRecord.NextInt32(); MoveCursor(x, y); } break; default: //if (CommandReceived != null) // CommandReceived(aRecord); break; } }
public static byte[] CompressVertical(AccessibleBitmap source) { byte[] lastpixel = null; // Create variable to store the last pixel int colorCounter = 1; // Create counter for current color BitStreamFIFO bs = new BitStreamFIFO(); // Create new bitstream for all the bits int maxCount = 0; // Create variable to store the max bitcount Queue<PixelArray> output = new Queue<PixelArray>(); // Create list to store all the pixelvalues in // Write one bit to the bitstream, so the decompressor knows to decompress vertically bs.Write(true); // Iterate through every vertical row for (int x = 0; x < source.width; x++) { // Iterate through every pixel in the vertical row for (int y = 0; y < source.height; y++) { // Check if the variable lastpixel is empty if (lastpixel == null) { // If lastpixel is empty, set last pixel to the first pixel lastpixel = source.GetPixel(x, y); } else { // If lastpixel isn't empty, compare last pixel with new pixel if (lastpixel.SequenceEqual(source.GetPixel(x, y))) { // Pixels matched, so increase the counter value colorCounter++; } else { // If the pixels don't match, add the counter with the last pixel to the output queue output.Enqueue(new PixelArray(colorCounter, lastpixel)); // Check if the new countervalue is higher then the last one, if so set maxBitCount to that if (colorCounter > maxCount) maxCount = colorCounter; // Reset the colorCounter and set the last pixel to the new pixel colorCounter = 1; lastpixel = source.GetPixel(x, y); } } } } // Add the remaining pixel(s) to the bitstream output.Enqueue(new PixelArray(colorCounter, lastpixel)); // Check if the new countervalue is higher then the last one, if so set maxBitCount to that if (colorCounter > maxCount) maxCount = colorCounter; // Write the maxCount to the bitstream bs.Write((byte)Math.Ceiling(Math.Log(maxCount, 2))); // Add all the pixels from the queue to the bitstream while (output.Count > 0) { PixelArray pixel = output.Dequeue(); bs.Write(pixel.Count, (int)Math.Ceiling(Math.Log(maxCount, 2))); bs.Write(pixel.Pixel); } // Return the bitsream as a byte[] return bs.ToByteArray(); }
/// <summary> /// Compute the Inverse DCT /// </summary> /// <param name="dctBuffer">F(u, v); DCT coefficients</param> /// <param name="fBasis">DCT basis functions</param> /// <param name="imgBuffer">f(x, y); destination image</param> public PixelArray InverseDCT(PixelArray dctBuffer) { //int N = fBlockSize; // extract the dimensions of the DCT buffer int width = dctBuffer.Width; int height = dctBuffer.Height; PixelArray imgBuffer = new PixelArray(width, height, PixelType.BGRb, PixmapOrientation.TopToBottom, 1); // intermediate result (an NxN block) PixelArray dctBlock = new PixelArray(fBlockSize, fBlockSize, PixelType.RGBd, PixmapOrientation.TopToBottom, 1); // // for every NxN block of the coefficients // for (int y = 0; y < height; y += fBlockSize) { for (int x = 0; x < width; x += fBlockSize) { // // do the first matrix multiplication // ([fBasis][dctBuffer]) // and store the results in dctBlock // for (int col = 0; col < fBlockSize; ++col) { for (int row = 0; row < fBlockSize; ++row) { RGBd result = new RGBd(0.0, 0.0, 0.0); for (int index = 0; index < fBlockSize; ++index) { RGBd coeff = new RGBd(); coeff.SetBytes(dctBuffer.GetPixelBytes(x + index, y + row)); double cosVal = fBasis[col, index]; result.red += cosVal * coeff.Red; result.green += cosVal * coeff.Green; result.blue += cosVal * coeff.Blue; } dctBlock.SetPixel(col, row, result); } } // // do the second matrix multiplication // ([dctBlock][fBasis]') // and store the results in imgBuffer // for (int col2 = 0; col2 < fBlockSize; ++col2) { for (int row2 = 0; row2 < fBlockSize; ++row2) { RGBd result = new RGBd(0.0, 0.0, 0.0); for (int index = 0; index < fBlockSize; ++index) { RGBd coeff = new RGBd(); coeff.SetBytes(dctBlock.GetPixelBytes(col2, index)); double cosVal_transpose = fBasis[row2, index]; result.red += coeff.Red * cosVal_transpose; result.green += coeff.Green * cosVal_transpose; result.blue += coeff.Blue * cosVal_transpose; } // // assign the computed value back to the image // byte red = (byte)Math.Floor(result.Red + 0.5); byte green = (byte)Math.Floor(result.Green + 0.5); byte blue = (byte)Math.Floor(result.Blue + 0.5); BGRb new_pixel = new BGRb(red, green, blue); imgBuffer.SetPixel(x + col2, y + row2, new_pixel); } } } } return imgBuffer; }
public virtual void PixBltLum24(PixelArray<Lumb> pixMap, int x, int y) { //fBackingGraphPort.PixBlt(pixMap, x, y); }
public virtual void PixBltLumb(PixelArray<Lumb> pixBuff, int x, int y) { if (null != PixBltLumbHandler) PixBltLumbHandler(pixBuff, x, y); }
public static IPixelArray CreatePixelArrayFromFileStream(FileStream filestream) { int tgaSize; bool isExtendedFile; long fileLength = filestream.Length; // We'll use a binary reader to make it easier // to get at the specific data types BinaryReader reader = new BinaryReader(filestream); // Targa images come in many different formats, and there are a couple of different versions // of the specification. // First thing to do is determine if the file is adhereing to version 2.0 of the spcification. // We do that by reading a 'footer', which is the last 26 bytes of the file. string targaXFileID = "TRUEVISION-XFILE"; TargaFooter footer = null; // Get the last 26 bytes of the file so we can see if the signature // is in there. long seekPosition = filestream.Seek(fileLength - 26, SeekOrigin.Begin); byte[] targaFooterBytes = reader.ReadBytes(26); string targaFooterSignature = System.Text.ASCIIEncoding.ASCII.GetString(targaFooterBytes, 8, 16); // If the strings compare favorably, then we have a match for an extended // TARGA file type. isExtendedFile = (0 == string.Compare(targaFooterSignature, targaXFileID)); if (isExtendedFile) { // Since we now know it's an extended file, // we'll create the footer object and fill // in the details. footer = new TargaFooter(); // Of the 26 bytes we read from the end of the file // the bytes are layed out as follows. //Bytes 0-3: The Extension Area Offset //Bytes 4-7: The Developer Directory Offset //Bytes 8-23: The Signature //Byte 24: ASCII Character “.” //Byte 25: Binary zero string terminator (0x00) // We take those raw bytes, and turn them into meaningful fields // in the footer object. footer.ExtensionAreaOffset = BitConverter.ToInt32(targaFooterBytes, 0); footer.DeveloperDirectoryOffset = BitConverter.ToInt32(targaFooterBytes, 4); footer.Signature = targaFooterSignature; footer.Period = (byte)'.'; footer.BinaryZero = 0; } // Now create the header that we'll fill int TargaHeader fHeader = new TargaHeader(); // Go back to the beginning of the file first filestream.Seek(0, SeekOrigin.Begin); fHeader.IDLength = reader.ReadByte(); fHeader.ColorMapType = (TargaColorMapType)reader.ReadByte(); fHeader.ImageType = (TargaImageType)reader.ReadByte(); fHeader.CMapStart = reader.ReadInt16(); fHeader.CMapLength = reader.ReadInt16(); fHeader.CMapDepth = reader.ReadByte(); // Image description fHeader.XOffset = reader.ReadInt16(); fHeader.YOffset = reader.ReadInt16(); fHeader.Width = reader.ReadInt16(); // Width of image in pixels fHeader.Height = reader.ReadInt16(); // Height of image in pixels fHeader.PixelDepth = reader.ReadByte(); // How many bits per pixel fHeader.ImageDescriptor = reader.ReadByte(); /// The single byte that is the ImageDescriptor contains the following /// information. // Bits 3-0 - number of attribute bits associated with each | // pixel. For the Targa 16, this would be 0 or | // 1. For the Targa 24, it should be 0. For | // Targa 32, it should be 8. | // Bit 4 - controls left/right transfer of pixels to /// the screen. /// 0 = left to right /// 1 = right to left // Bit 5 - controls top/bottom transfer of pixels to /// the screen. /// 0 = bottom to top /// 1 = top to bottom /// /// In Combination bits 5/4, they would have these values /// 00 = bottom left /// 01 = bottom right /// 10 = top left /// 11 = top right /// // Bits 7-6 - Data storage interleaving flag. | // 00 = non-interleaved. | // 01 = two-way (even/odd) interleaving. | // 10 = four way interleaving. | // 11 = reserved. byte desc = fHeader.ImageDescriptor; byte attrBits = (byte)(desc & 0x0F); byte horizontalOrder = (byte)((desc & 0x10) >> 4); byte verticalOrder = (byte)((desc & 0x20) >> 5); byte interleave = (byte)((desc & 0xC0) >> 6); // We can't deal with the compressed image types, so if we encounter // any of them, we'll just return null. if ((TargaImageType.TrueColor != fHeader.ImageType) && (TargaImageType.Monochrome != fHeader.ImageType)) { return null; } PixmapOrientation pixmapOrientation = PixmapOrientation.BottomToTop; if (0 == verticalOrder) pixmapOrientation = PixmapOrientation.BottomToTop; else pixmapOrientation = PixmapOrientation.TopToBottom; int bytesPerPixel = fHeader.PixelDepth / 8; // Skip past the Image Identification field if there is one byte[] ImageIdentification; if (fHeader.IDLength > 0) ImageIdentification = reader.ReadBytes(fHeader.IDLength); // calculate image size based on bytes per pixel, width and height. int bytesPerRow = fHeader.Width * bytesPerPixel; tgaSize = bytesPerPixel * fHeader.Height; byte[] imageData = reader.ReadBytes((int)tgaSize); // Create the correct pixel array for the data switch (bytesPerPixel) { case 3: { PixelArray<BGRb> pixmap = new PixelArray<BGRb>(fHeader.Width, fHeader.Height, imageData); return pixmap; } case 4: { PixelArray<BGRAb> pixmap = new PixelArray<BGRAb>(fHeader.Width, fHeader.Height, imageData); return pixmap; } case 1: { PixelArray<Lumb> pixmap = new PixelArray<Lumb>(fHeader.Width, fHeader.Height, imageData); return pixmap; } } return null; }
/// <summary> /// This method is called through an Event dispatch. It is registered with /// the DataChangedEvent of the viewer class. It should be raised whenever /// a new set of drawing commands hits to viewer from the network. /// </summary> /// public virtual void PixBltLum24(PixelArray<Lumb> pixMap, int x, int y) { //fBackingBuffer = pixMap; // Copy luminance back into pixel buffer if (fBackingBuffer.Width != pixMap.Width || fBackingBuffer.Height != pixMap.Height) fBackingBuffer = new GDIDIBSection(pixMap.Width, pixMap.Height); PixmapTransfer.Copy(fBackingBuffer, pixMap); Rectangle srcRect = new Rectangle(0, 0, pixMap.Width, pixMap.Height); if (fAutoScale) DeviceContextClientArea.AlphaBlend(fBackingBuffer.DeviceContext, srcRect, ClientRectangle, 255); else DeviceContextClientArea.BitBlt(fBackingBuffer.DeviceContext, new Point(0, 0), new Rectangle(0, 0, pixMap.Width, pixMap.Height), TernaryRasterOps.SRCCOPY); }
public NodeContainerFormat Convert(NodeContainerFormat source) { if (source == null) { throw new ArgumentNullException(nameof(source)); } NodeContainerFormat output = new NodeContainerFormat(); DataReader komaReader = new DataReader(Koma.Stream); int komaEntryNumber = (int)(komaReader.Stream.Length / KOMA_ENTRY_SIZE); for (int i = 0; i < komaEntryNumber; i++) { byte[] entry = komaReader.ReadBytes(KOMA_ENTRY_SIZE); // DTX NAME FROM ARM9 byte letterKomaName = entry[04]; byte numberKomaName = entry[05]; DataReader armReader = new DataReader(Arm.Stream); string dtxName = ""; armReader.Stream.RunInPosition( () => { dtxName = armReader.ReadString(); }, (KOMA_NAME_TABLE_OFFSET + letterKomaName * 4)); dtxName += "_" + numberKomaName; if (numberKomaName == 0) { dtxName += 0; } log.Debug("dtxName:" + dtxName); // DTX SHAPE byte indexGroupKshape = entry[08]; byte indexElementKshape = entry[09]; DataReader komaShapeReader = new DataReader(Komashape.Stream); long komaShapeOffset = 0; komaShapeReader.Stream.RunInPosition( () => komaShapeOffset = ((komaShapeReader.ReadInt32() + indexElementKshape) * 0x18) + 0x40, (indexGroupKshape * 4)); log.Debug("komaShapeOffset:" + komaShapeOffset); // DTX File Node dtx = Navigator.SearchNode <Node>(source.Root, Path.Combine("/" + Directory, "koma-" + dtxName + ".dtx")); DataReader dtxReader = new DataReader(dtx.Stream); int magicid = dtxReader.ReadInt32(); byte type = dtxReader.ReadByte(); byte type_alt = dtxReader.ReadByte(); short totalFramesNumber = dtxReader.ReadInt16(); short digPointer = dtxReader.ReadInt16(); short unknown = dtxReader.ReadInt16(); byte[] width = new byte[totalFramesNumber]; byte[] height = new byte[totalFramesNumber]; short[] frameIndex = new short[totalFramesNumber]; for (int j = 0; j < totalFramesNumber; j++) { width[j] = dtxReader.ReadByte(); height[j] = dtxReader.ReadByte(); frameIndex[j] = dtxReader.ReadInt16(); } BinaryFormat bfDIG = new BinaryFormat(dtx.Stream, (long)digPointer, (dtx.Stream.Length - (long)digPointer)); DIG dig = (DIG)ConvertFormat.With <Binary2DIG>(bfDIG); // Iterate KomaShape komaShapeReader.Stream.Position = komaShapeOffset; // Fichero Dig tiene 08 de ancho y 872 de alto // 08 * 872 / 2 = 3488 bytes byte[] dtxPixels = new byte[192 * 240 / 2]; // *** REVISAR int x = 0; int y = 0; log.Debug("==KOMASHAPE=="); // Iterate kshape for (int k = 0; k < 0x14; k++) { byte blockDTX = komaShapeReader.ReadByte(); log.Debug(k + " - Byte: " + blockDTX); if (blockDTX > 00) { blockDTX -= 1; // Empieza el primer bloque en el dtx long startIndex = frameIndex[blockDTX] * 0x20 + dig.PixelsStart + 32; log.Debug("startIndex:" + startIndex); int blockSize = width[blockDTX] * 8 * height[blockDTX] * 8; for (int l = 0; l < blockSize; l++) { int position = GetIndex(PixelEncoding.Lineal, x, y, 192, 240, new Size(8, 8)); dtxPixels[position] = dig.Pixels.GetData()[startIndex + l]; log.Debug(l + " - dtxPixels:" + dtxPixels[l]); x += 1; if (x >= 192) { x = 0; y += 1; } log.Debug("x: " + x); log.Debug("y: " + y); } } x += 48; if (x >= 192) { x = 0; y += 48; } log.Debug("x: " + x); log.Debug("y: " + y); } log.Debug("===="); // Generate new image PixelArray extractedDTX = new PixelArray { Width = 192, Height = 240, }; Palette palette = dig.Palette; extractedDTX.SetData(dtxPixels, PixelEncoding.Lineal, ColorFormat.Indexed_8bpp); var img = extractedDTX.CreateBitmap(palette, 0); var s = new MemoryStream(); img.Save(s, System.Drawing.Imaging.ImageFormat.Png); img.Save("test.png"); // Add to container var n = new Node(dtxName, new BinaryFormat(DataStreamFactory.FromStream(s))); output.Root.Add(n); } return(output); }
/// <summary> /// Compute the Forward DCT /// </summary> /// <param name="imgBuffer">f(x, y); source image</param> /// <param name="dctBuffer">F(u, v); DCT coefficients</param> /// <param name="fBasis">DCT basis functions</param> public PixelArray ForwardDCT(PixelArray imgBuffer) { int N = fBlockSize; // extract the dimensions of the image int width = imgBuffer.Width; int height = imgBuffer.Height; // Create the array of DCT coefficients PixelArray<RGBd> dctBuffer = new PixelArray<RGBd>(width, height); // intermediate result (an NxN block) PixelArray<RGBd> dctBlock = new PixelArray<RGBd>(N, N); // // for every NxN block of the image // for (int y = 0; y < height; y += N) { for (int x = 0; x < width; x += N) { // // do the first matrix multiplication // ([fBasis]' [imgBuffer]) // and store the results in dctBlock // for (int col = 0; col < N; ++col) { for (int row = 0; row < N; ++row) { RGBd result = new RGBd(0.0, 0.0, 0.0); for (int index = 0; index < N; ++index) { BGRb pixel = new BGRb(imgBuffer.GetPixelBytes(x + index, y + row)); double cosVal_transpose = fBasis[index, col]; result.red += cosVal_transpose * pixel.Red; result.green += cosVal_transpose * pixel.Green; result.blue += cosVal_transpose * pixel.Blue; } dctBlock.SetPixel(col, row, result); } } // // do the second matrix multiplication // ([dctBlock] [fBasis]) // and store the results in dctBuffer // for (int col2 = 0; col2 < N; ++col2) { for (int row2 = 0; row2 < N; ++row2) { RGBd result = new RGBd(0.0, 0.0, 0.0); for (int index = 0; index < N; ++index) { RGBd coeff = new RGBd(dctBlock.GetPixelBytes(col2, index)); double cosVal = fBasis[index, row2]; result.red += coeff.Red * cosVal; result.green += coeff.Green * cosVal; result.blue += coeff.Blue * cosVal; } dctBuffer.SetPixel(x + col2, y + row2, result); } } } } return dctBuffer; }
public override void PixBltBGRAb(PixelArray<BGRAb> pixBuff, int x, int y) { PixBltPixelArray(pixBuff, x, y); }
public override void PixBltLumb(PixelArray<Lumb> pixMap, int x, int y) { MemoryStream ms = new MemoryStream(); // 2. Run length encode the image to a memory stream NewTOAPIA.Imaging.TargaLuminanceRLE rlc = new NewTOAPIA.Imaging.TargaLuminanceRLE(); rlc.Encode(pixMap, ms); // 3. Get the bytes from the stream byte[] imageBytes = ms.GetBuffer(); int dataLength = (int)imageBytes.Length; // 4. Allocate a buffer chunk to accomodate the bytes, plus some more BufferChunk chunk = new BufferChunk(dataLength + 128); // 5. Put the command, destination, and data size into the buffer first chunk += (int)UICommands.PixBltLuminance; ChunkUtils.Pack(chunk, x, y); ChunkUtils.Pack(chunk, pixMap.Width, pixMap.Height); chunk += dataLength; // 6. Put the image bytes into the chunk chunk += imageBytes; // 6. Finally, send the packet SendCommand(chunk); }