/// <summary> /// Function to convert a pixel coordinate into a texel coordinate. /// </summary> /// <param name="pixelCoordinates">The pixel coordinate to convert.</param> /// <returns>The texel coordinates.</returns> public DX.Vector3 ToTexel(DX.Point pixelCoordinates) { float width = Texture.Width; float height = Texture.Height; return(new DX.Vector3(pixelCoordinates.X / width, pixelCoordinates.Y / height, Depth / (float)Depth)); }
/// <summary> /// Function to build builds from the font information. /// </summary> /// <param name="textures">The list of textures loaded.</param> /// <param name="fontInfo">The font information to retrieve glyph data from.</param> /// <returns>A new list of glyphs.</returns> private IReadOnlyList <GorgonGlyph> GetGlyphs(IReadOnlyList <GorgonTexture2D> textures, BmFontInfo fontInfo) { var glyphs = new List <GorgonGlyph>(); foreach (char character in fontInfo.Characters) { int advance = fontInfo.CharacterAdvances[character]; // Build a glyph that is not linked to a texture if it's whitespace. if (char.IsWhiteSpace(character)) { glyphs.Add(CreateGlyph(character, advance)); continue; } int textureIndex = fontInfo.GlyphTextureIndices[character]; GorgonTexture2D texture = textures[textureIndex]; DX.Rectangle glyphRectangle = fontInfo.GlyphRects[character]; DX.Point offset = fontInfo.GlyphOffsets[character]; GorgonGlyph glyph = CreateGlyph(character, advance); glyph.Offset = offset; glyph.UpdateTexture(texture, glyphRectangle, DX.Rectangle.Empty, 0); glyphs.Add(glyph); } return(glyphs); }
/// <summary> /// Initializes a new instance of the <see cref="GlyphInfo"/> class. /// </summary> /// <param name="glyphBitmap">The glyph bitmap.</param> /// <param name="region">The region.</param> /// <param name="offset">The offset.</param> /// <param name="outlineRegion">The outline region.</param> /// <param name="outlineOffset">The outline offset.</param> public GlyphInfo(Bitmap glyphBitmap, DX.Rectangle region, DX.Point offset, DX.Rectangle outlineRegion, DX.Point outlineOffset) { GlyphBitmap = glyphBitmap; Region = region; Offset = offset; OutlineRegion = outlineRegion; OutlineOffset = outlineOffset; }
/// <summary> /// Function to copy an image onto another image, using the supplied alignment. /// </summary> /// <param name="srcImage">The image to copy.</param> /// <param name="destImage">The destination image.</param> /// <param name="startMip">The starting mip map level to copy.</param> /// <param name="startArrayOrDepth">The starting array index for 2D images, or depth slice for 3D images.</param> /// <param name="alignment">The alignment of the image, relative to the source image.</param> public void CopyTo(IGorgonImage srcImage, IGorgonImage destImage, int startMip, int startArrayOrDepth, Alignment alignment) { int mipCount = destImage.MipCount - startMip; int arrayCount = destImage.ArrayCount - (destImage.ImageType == ImageType.Image3D ? 0 : startArrayOrDepth); int minMipCount = mipCount.Min(srcImage.MipCount); int minArrayCount = arrayCount.Min(srcImage.ArrayCount); var size = new DX.Size2(srcImage.Width, srcImage.Height); for (int array = 0; array < minArrayCount; ++array) { for (int mip = 0; mip < minMipCount; ++mip) { int destDepthCount = destImage.GetDepthCount(mip + startMip); int minDepth = destDepthCount.Min(srcImage.GetDepthCount(mip)); for (int depth = 0; depth < minDepth; ++depth) { int destOffset; if (destImage.ImageType == ImageType.Image3D) { destOffset = depth + startArrayOrDepth; // We're at the end of the destination buffer, skip the rest of the slices. if (destOffset >= destDepthCount) { break; } } else { destOffset = array + startArrayOrDepth; } IGorgonImageBuffer srcBuffer = srcImage.Buffers[mip, srcImage.ImageType == ImageType.Image3D ? depth : array]; IGorgonImageBuffer destBuffer = destImage.Buffers[mip + startMip, destOffset]; // Clear the destination buffer before copying. destBuffer.Data.Fill(0); int minWidth = destBuffer.Width.Min(srcBuffer.Width); int minHeight = destBuffer.Height.Min(srcBuffer.Height); var copyRegion = new DX.Rectangle(0, 0, minWidth, minHeight); DX.Point startLoc = GetAnchorStart(new DX.Size2(minWidth, minHeight), ref size, alignment); srcBuffer.CopyTo(destBuffer, copyRegion, startLoc.X, startLoc.Y); } } } }
private static bool SameSide(SharpDX.Point p1, SharpDX.Point p2, SharpDX.Point a, SharpDX.Point b) { // Vector3D is faster (~1.25x) than using a struct Point3D... var p1a = new Vector3(p1.X, p1.Y, 0); var p2a = new Vector3(p2.X, p2.Y, 0); var aa = new Vector3(a.X, a.Y, 0); var ba = new Vector3(b.X, b.Y, 0); var cp1 = Vector3.Cross(ba - aa, p1a - aa); var cp2 = Vector3.Cross(ba - aa, p2a - aa); return(Vector3.Dot(cp1, cp2) >= 0); }
private void OnMouseMove(object sender, MouseEventArgs e) { var dxScene = MainDXViewportView.DXScene; if (dxScene == null) { return; } var mousePosition = e.GetPosition(ViewportBorder); int xPos = (int)mousePosition.X; int yPos = (int)mousePosition.Y; if (xPos == _lastMousePosition.X && yPos == _lastMousePosition.Y) { return; } _lastMousePosition = new SharpDX.Point(xPos, yPos); var mouseRay = dxScene.GetRayFromCamera(xPos, yPos); // Using OctTree significantly improve hit testing performance // Check this with uncommenting the following line (and commenting the use of OctTree): //var hitResult = dxScene.GetClosestHitObject(mouseRay); var hitResult = _octTree.HitTest(ref mouseRay, new DXHitTestContext(dxScene)); int selectedSphereIndex; if (hitResult == null) { selectedSphereIndex = -1; } else { selectedSphereIndex = (hitResult.TriangleIndex * 3) / _oneMeshTriangleIndicesCount; } if (selectedSphereIndex == _lastSelectedSphereIndex) { return; } SelectSphere(selectedSphereIndex); }
/// <summary> /// Function to convert a 2D point value from pixel coordinates to texel space. /// </summary> /// <param name="pixelPoint">The pixel size to convert.</param> /// <param name="mipLevel">[Optional] The mip level to use.</param> /// <returns>A 2D vector containing the texel space coordinates.</returns> /// <remarks> /// <para> /// If specified, the <paramref name="mipLevel"/> only applies to the <see cref="MipSlice"/> and <see cref="MipCount"/> for this view, it will be constrained if it falls outside of that range. /// Because of this, the coordinates returned may not be the exact size of the texture bound to the view at mip level 0. If the <paramref name="mipLevel"/> is omitted, then the first mip level /// for the underlying <see cref="Texture"/> is used. /// </para> /// </remarks> public DX.Vector2 ToTexel(DX.Point pixelPoint, int?mipLevel = null) { float width = Texture.Width; float height = Texture.Height; if (mipLevel == null) { return(new DX.Vector2(pixelPoint.X / width, pixelPoint.Y / height)); } width = GetMipWidth(mipLevel.Value); height = GetMipHeight(mipLevel.Value); return(new DX.Vector2(pixelPoint.X / width, pixelPoint.Y / height)); }
/// <summary> /// Function to convert a pixel coordinate into a texel coordinate. /// </summary> /// <param name="pixelCoordinates">The pixel coordinate to convert.</param> /// <param name="mipLevel">[Optional] The mip level to use.</param> /// <returns>The texel coordinates.</returns> /// <remarks> /// <para> /// If specified, the <paramref name="mipLevel"/> only applies to the <see cref="MipSlice"/> and <see cref="MipCount"/> for this view, it will be constrained if it falls outside of that range. /// Because of this, the coordinates returned may not be the exact size of the texture bound to the view at mip level 0. If the <paramref name="mipLevel"/> is omitted, then the first mip level /// for the underlying <see cref="Texture"/> is used. /// </para> /// </remarks> public DX.Vector3 ToTexel(DX.Point pixelCoordinates, int?mipLevel = null) { float width = Texture.Width; float height = Texture.Height; if (mipLevel == null) { return(new DX.Vector3(pixelCoordinates.X / width, pixelCoordinates.Y / height, Depth / (float)Depth)); } width = GetMipWidth(mipLevel.Value); height = GetMipHeight(mipLevel.Value); return(new DX.Vector3(pixelCoordinates.X / width, pixelCoordinates.Y / height, Depth / (float)Depth)); }
/// <summary> /// Function to convert a pixel coordinate into a texel coordinate. /// </summary> /// <param name="pixelCoordinates">The pixel coordinate to convert.</param> /// <param name="mipLevel">[Optional] The mip level to use.</param> /// <param name="depthSlice">[Optional] The depth slice to use.</param> /// <returns>The texel coordinates.</returns> /// <remarks> /// <para> /// If specified, the <paramref name="mipLevel"/> only applies to the <see cref="MipSlice"/> for this view, it will be constrained if it falls outside of that range. /// Because of this, the coordinates returned may not be the exact size of the texture bound to the view at mip level 0. If the <paramref name="mipLevel"/> is omitted, then the first mip level /// for the underlying <see cref="Texture"/> is used. /// </para> /// <para> /// If specified, the <paramref name="depthSlice"/> only applies to the <see cref="StartDepthSlice"/> and <see cref="DepthSliceCount"/> for this view, it will be constrained if it falls outside of that /// range. Because of this, the coordinates returned may not be the exact size of the texture bound to the view at mip level 0. If the <paramref name="mipLevel"/> is omitted, then the first depth slice /// is used. /// </para> /// </remarks> public DX.Vector3 ToTexel(DX.Point pixelCoordinates, int?mipLevel = null, int?depthSlice = null) { float width = Texture.Width; float height = Texture.Height; float depth = depthSlice ?? 0; depth = depth.Min(DepthSliceCount + StartDepthSlice).Max(StartDepthSlice); if (mipLevel == null) { return(new DX.Vector3(pixelCoordinates.X / width, pixelCoordinates.Y / height, depth / DepthSliceCount)); } width = GetMipWidth(mipLevel.Value); height = GetMipHeight(mipLevel.Value); return(new DX.Vector3(pixelCoordinates.X / width, pixelCoordinates.Y / height, depth / DepthSliceCount)); }
public override void MouseMoveAction(System.Windows.Forms.MouseEventArgs e) { var mouseLocation = new SharpDX.Point(e.Location.X, e.Location.Y); if (e.Button == System.Windows.Forms.MouseButtons.Left) { var dx = mouseLocation.X - this.StartDragPoint.X; var dy = mouseLocation.Y - this.StartDragPoint.Y; if (isRect) { selectionRect.Width += dx; selectionRect.Height += dy; } IMoveable q = draggedFigure as IMoveable; if (q != null) { q.Offset(dx, dy); } UpdateMarkers(); } else { var figure = FindFigureByPoint(mouseLocation); if (figure is Marker) { this.ActiveCursor = System.Windows.Forms.Cursors.SizeAll; } else if (figure != null) { this.ActiveCursor = System.Windows.Forms.Cursors.Hand; } else { this.ActiveCursor = System.Windows.Forms.Cursors.Cross; } this.StartDragPoint = mouseLocation; } }
public static bool IsPointInSegment(ref SharpDX.Point p, ref Location From, ref Location To) { var p1 = From; var p2 = To; var dx = p2.X - p1.X; var dy = p2.Y - p1.Y; if (dx == 0) { return(p.Y >= p1.Y && p.Y <= p2.Y); } if (dy == 0) { return(p.X >= p1.X && p.Y <= p2.X); } else { var s1 = (p.X - p1.X) / dx; var s2 = (p.Y - p1.Y) / dy; return(System.Math.Abs(s1 - s2) <= 5E-2); } }
protected virtual void OnMouseUp(MouseButtons button, Point location) { _window.Capture = false; }
protected virtual void OnMouseDown(MouseButtons button, Point location) { _window.Capture = true; }
/// <summary> /// Function to blit the texture to the specified render target. /// </summary> /// <param name="texture">The texture that will be blitted to the render target.</param> /// <param name="destRect">The layout area to blit the texture into.</param> /// <param name="sourceOffset">The offset within the source texture to start blitting from.</param> /// <param name="color">The color used to tint the diffuse value of the texture.</param> /// <param name="clip"><b>true</b> to clip the contents of the texture if the destination is larger/small than the size of the texture.</param> /// <param name="blendState">The blending state to apply.</param> /// <param name="samplerState">The sampler state to apply.</param> /// <param name="pixelShader">The pixel shader used to override the default pixel shader.</param> /// <param name="pixelShaderConstants">The pixel shader constant buffers to use.</param> public void Blit(GorgonTexture2DView texture, DX.Rectangle destRect, DX.Point sourceOffset, GorgonColor color, bool clip, GorgonBlendState blendState, GorgonSamplerState samplerState, GorgonPixelShader pixelShader, GorgonConstantBuffers pixelShaderConstants) { if ((_graphics.RenderTargets[0] == null) || (color.Alpha.EqualsEpsilon(0))) { return; } if (texture == null) { texture = _defaultTexture; } GorgonRenderTargetView currentView = _graphics.RenderTargets[0]; // We need to update the projection/view if the size of the target changes. if ((_targetBounds == null) || (currentView.Width != _targetBounds.Value.Width) || (currentView.Height != _targetBounds.Value.Height)) { _needsWvpUpdate = true; } UpdateProjection(); // Set to default states if not provided. if (blendState == null) { blendState = GorgonBlendState.NoBlending; } if (pixelShader == null) { pixelShader = _pixelShader; } if (samplerState == null) { samplerState = GorgonSamplerState.Default; } if (pixelShaderConstants == null) { pixelShaderConstants = _emptyPsConstants; } GetDrawCall(texture, blendState, samplerState, pixelShader, pixelShaderConstants); // Calculate position on the texture. DX.Vector2 topLeft = texture.Texture.ToTexel(sourceOffset); DX.Vector2 bottomRight = texture.Texture.ToTexel(clip ? new DX.Vector2(destRect.Width, destRect.Height) : new DX.Point(texture.Width, texture.Height)); if (clip) { DX.Vector2.Add(ref bottomRight, ref topLeft, out bottomRight); } // Update the vertices. _vertices[0] = new BltVertex { Position = new DX.Vector4(destRect.X, destRect.Y, 0, 1.0f), Uv = topLeft, Color = color }; _vertices[1] = new BltVertex { Position = new DX.Vector4(destRect.Right, destRect.Y, 0, 1.0f), Uv = new DX.Vector2(bottomRight.X, topLeft.Y), Color = color }; _vertices[2] = new BltVertex { Position = new DX.Vector4(destRect.X, destRect.Bottom, 0, 1.0f), Uv = new DX.Vector2(topLeft.X, bottomRight.Y), Color = color }; _vertices[3] = new BltVertex { Position = new DX.Vector4(destRect.Right, destRect.Bottom, 0, 1.0f), Uv = new DX.Vector2(bottomRight.X, bottomRight.Y), Color = color }; // Copy to the vertex buffer. _vertexBufferBindings[0].VertexBuffer.SetData(_vertices); _graphics.Submit(_drawCall); }
/// <summary> /// SharpDX.Point を System.Drawing.Point へ変換する。 /// </summary> public static System.Drawing.Point ToDrawingPoint(this SharpDX.Point point) => new System.Drawing.Point(point.X, point.Y);
public static Vector2 GetCursorPositionVector() { SharpDX.Point currentMousePoint = GetCursorPosition(); return(new Vector2(currentMousePoint.X, currentMousePoint.Y)); }
/// <summary> /// Function to extract the rectangle that defines the sprite on the texture, in pixels. /// </summary> /// <param name="texture">The texture to evaluate.</param> /// <param name="column">The current column.</param> /// <param name="row">The current row.</param> /// <param name="offset">The offset of the grid from the upper left corner of the texture.</param> /// <param name="cellSize">The size of the cell, in pixels.</param> /// <returns>The pixel coordinates for the sprite.</returns> private DX.Rectangle GetSpriteRect(int column, int row, DX.Point offset, DX.Size2 cellSize) { var upperLeft = new DX.Point(column * cellSize.Width + offset.X, row * cellSize.Height + offset.Y); return(new DX.Rectangle(upperLeft.X, upperLeft.Y, cellSize.Width, cellSize.Height)); }
/// <inveritdoc/> public override void RenderAll() { SurfaceViewData viewData; var regionToDraw = new SharpDX.Rectangle(0, 0, pixelWidth, pixelHeight); // Unlike other targets, we can only get the DXGI surface to render to // just before rendering. SharpDX.Mathematics.Interop.RawPoint rawPoint; using (var surface = surfaceImageSourceNative.BeginDraw(regionToDraw, out rawPoint)) { position = rawPoint; // Cache DXGI surface in order to avoid recreate all render target view, depth stencil...etc. // Is it the right way to do it? // It seems that ISurfaceImageSourceNative.BeginDraw is returning 2 different DXGI surfaces if (!mapSurfaces.TryGetValue(surface.NativePointer, out viewData)) { viewData = new SurfaceViewData(); mapSurfaces.Add(surface.NativePointer, viewData); // Allocate a new renderTargetView if size is different // Cache the rendertarget dimensions in our helper class for convenient use. viewData.BackBuffer = Collect(surface.QueryInterface<SharpDX.Direct3D11.Texture2D>()); { var desc = viewData.BackBuffer.Description; viewData.RenderTargetSize = new Size(desc.Width, desc.Height); viewData.RenderTargetView = Collect(new SharpDX.Direct3D11.RenderTargetView(DeviceManager.DeviceDirect3D, viewData.BackBuffer)); } // Create a descriptor for the depth/stencil buffer. // Allocate a 2-D surface as the depth/stencil buffer. // Create a DepthStencil view on this surface to use on bind. // TODO: Recreate a DepthStencilBuffer is inefficient. We should only have one depth buffer. Shared depth buffer? using (var depthBuffer = new SharpDX.Direct3D11.Texture2D(DeviceManager.DeviceDirect3D, new SharpDX.Direct3D11.Texture2DDescription() { Format = SharpDX.DXGI.Format.D24_UNorm_S8_UInt, ArraySize = 1, MipLevels = 1, Width = (int)viewData.RenderTargetSize.Width, Height = (int)viewData.RenderTargetSize.Height, SampleDescription = new SharpDX.DXGI.SampleDescription(1, 0), BindFlags = SharpDX.Direct3D11.BindFlags.DepthStencil, })) viewData.DepthStencilView = Collect(new SharpDX.Direct3D11.DepthStencilView(DeviceManager.DeviceDirect3D, depthBuffer, new SharpDX.Direct3D11.DepthStencilViewDescription() { Dimension = SharpDX.Direct3D11.DepthStencilViewDimension.Texture2D })); // Now we set up the Direct2D render target bitmap linked to the swapchain. // Whenever we render to this bitmap, it will be directly rendered to the // swapchain associated with the window. var bitmapProperties = new SharpDX.Direct2D1.BitmapProperties1( new SharpDX.Direct2D1.PixelFormat(SharpDX.DXGI.Format.B8G8R8A8_UNorm, SharpDX.Direct2D1.AlphaMode.Premultiplied), DeviceManager.Dpi, DeviceManager.Dpi, SharpDX.Direct2D1.BitmapOptions.Target | SharpDX.Direct2D1.BitmapOptions.CannotDraw); // Direct2D needs the dxgi version of the backbuffer surface pointer. // Get a D2D surface from the DXGI back buffer to use as the D2D render target. viewData.BitmapTarget = Collect(new SharpDX.Direct2D1.Bitmap1(DeviceManager.ContextDirect2D, surface, bitmapProperties)); // Create a viewport descriptor of the full window size. viewData.Viewport = new SharpDX.ViewportF(position.X, position.Y, (float)viewData.RenderTargetSize.Width - position.X, (float)viewData.RenderTargetSize.Height - position.Y, 0.0f, 1.0f); } backBuffer = viewData.BackBuffer; renderTargetView = viewData.RenderTargetView; depthStencilView = viewData.DepthStencilView; RenderTargetBounds = new Rect(viewData.Viewport.X, viewData.Viewport.Y, viewData.Viewport.Width, viewData.Viewport.Height); bitmapTarget = viewData.BitmapTarget; DeviceManager.ContextDirect2D.Target = viewData.BitmapTarget; // Set the current viewport using the descriptor. DeviceManager.ContextDirect3D.Rasterizer.SetViewport(viewData.Viewport); // Perform the actual rendering of this target base.RenderAll(); } surfaceImageSourceNative.EndDraw(); }
/// <summary> /// Function to expand an image width, height, and/or depth. /// </summary> /// <param name="baseImage">The image to expand.</param> /// <param name="newWidth">The new width of the image.</param> /// <param name="newHeight">The new height of the image.</param> /// <param name="newDepth">The new depth of the image.</param> /// <param name="anchor">[Optional] The anchor point for placing the image data after the image is expanded.</param> /// <returns>The expanded image.</returns> /// <remarks> /// <para> /// This will expand the size of an image, but not stretch the actual image data. This will leave a padding around the original image area filled with transparent pixels. /// </para> /// <para> /// The image data can be repositioned in the new image by specifying an <paramref name="anchor"/> point. /// </para> /// <para> /// If the new size of the image is smaller than that of the <paramref name="baseImage"/>, then the new size is constrained to the old size. Cropping is not supported by this method. /// </para> /// <para> /// If a user wishes to resize the image, then call the <see cref="Resize"/> method, of if they wish to crop an image, use the <see cref="Crop"/> method. /// </para> /// </remarks> public static IGorgonImage Expand(this IGorgonImage baseImage, int newWidth, int newHeight, int newDepth, ImageExpandAnchor anchor = ImageExpandAnchor.UpperLeft) { IGorgonImage workingImage = null; WicUtilities wic = null; try { // Constrain to the correct sizes. newWidth = newWidth.Max(baseImage.Width); newHeight = newHeight.Max(baseImage.Height); newDepth = newDepth.Max(baseImage.Depth); // Only use the appropriate dimensions. switch (baseImage.ImageType) { case ImageType.Image1D: newHeight = baseImage.Height; break; case ImageType.Image2D: case ImageType.ImageCube: newDepth = baseImage.Depth; break; } // We don't shink with this method, use the Crop method for that. if ((newWidth <= baseImage.Width) && (newHeight <= baseImage.Height) && (newDepth <= baseImage.Depth)) { return(baseImage); } wic = new WicUtilities(); workingImage = new GorgonImage(new GorgonImageInfo(baseImage) { Width = newWidth, Height = newHeight, Depth = newDepth }); DX.Point position = DX.Point.Zero; switch (anchor) { case ImageExpandAnchor.UpperMiddle: position = new DX.Point((newWidth / 2) - (baseImage.Width / 2), 0); break; case ImageExpandAnchor.UpperRight: position = new DX.Point(newWidth - baseImage.Width, 0); break; case ImageExpandAnchor.MiddleLeft: position = new DX.Point(0, (newHeight / 2) - (baseImage.Height / 2)); break; case ImageExpandAnchor.Center: position = new DX.Point((newWidth / 2) - (baseImage.Width / 2), (newHeight / 2) - (baseImage.Height / 2)); break; case ImageExpandAnchor.MiddleRight: position = new DX.Point(newWidth - baseImage.Width, (newHeight / 2) - (baseImage.Height / 2)); break; case ImageExpandAnchor.BottomLeft: position = new DX.Point(0, newHeight - baseImage.Height); break; case ImageExpandAnchor.BottomMiddle: position = new DX.Point((newWidth / 2) - (baseImage.Width / 2), newHeight - baseImage.Height); break; case ImageExpandAnchor.BottomRight: position = new DX.Point(newWidth - baseImage.Width, newHeight - baseImage.Height); break; } int calcMipLevels = GorgonImage.CalculateMaxMipCount(newWidth, newHeight, newDepth).Min(baseImage.MipCount); workingImage = wic.Resize(baseImage, position.X, position.Y, newWidth, newHeight, newDepth, calcMipLevels, ImageFilter.Point, ResizeMode.Expand); // Send the data over to the new image. workingImage.CopyTo(baseImage); return(baseImage); } finally { workingImage?.Dispose(); wic?.Dispose(); } }
protected virtual void OnMouseMove(MouseButtons button, Point location) { }
public static bool PointInTriangle(SharpDX.Point p, SharpDX.Point a, SharpDX.Point b, SharpDX.Point c) { return(SameSide(p, a, b, c) && SameSide(p, b, c, a) && SameSide(p, c, a, b)); }
/// <summary> /// Function to crop an image. /// </summary> /// <param name="cropImage">The image to crop.</param> /// <param name="destSize">The new size of the image.</param> /// <param name="alignment">The location to start cropping from.</param> public void CropTo(IGorgonImage cropImage, DX.Size2 destSize, Alignment alignment) { DX.Point startLoc = GetAnchorStart(new DX.Size2(cropImage.Width, cropImage.Height), ref destSize, alignment); cropImage.Crop(new DX.Rectangle(startLoc.X, startLoc.Y, destSize.Width, destSize.Height), cropImage.Depth); }
/// <inveritdoc/> public override void RenderAll() { SurfaceViewData viewData; var regionToDraw = new SharpDX.Rectangle(0, 0, pixelWidth, pixelHeight); // Unlike other targets, we can only get the DXGI surface to render to // just before rendering. SharpDX.Mathematics.Interop.RawPoint rawPoint; using (var surface = surfaceImageSourceNative.BeginDraw(regionToDraw, out rawPoint)) { position = rawPoint; // Cache DXGI surface in order to avoid recreate all render target view, depth stencil...etc. // Is it the right way to do it? // It seems that ISurfaceImageSourceNative.BeginDraw is returning 2 different DXGI surfaces if (!mapSurfaces.TryGetValue(surface.NativePointer, out viewData)) { viewData = new SurfaceViewData(); mapSurfaces.Add(surface.NativePointer, viewData); // Allocate a new renderTargetView if size is different // Cache the rendertarget dimensions in our helper class for convenient use. viewData.BackBuffer = Collect(surface.QueryInterface <SharpDX.Direct3D11.Texture2D>()); { var desc = viewData.BackBuffer.Description; viewData.RenderTargetSize = new Size(desc.Width, desc.Height); viewData.RenderTargetView = Collect(new SharpDX.Direct3D11.RenderTargetView(DeviceManager.DeviceDirect3D, viewData.BackBuffer)); } // Create a descriptor for the depth/stencil buffer. // Allocate a 2-D surface as the depth/stencil buffer. // Create a DepthStencil view on this surface to use on bind. // TODO: Recreate a DepthStencilBuffer is inefficient. We should only have one depth buffer. Shared depth buffer? using (var depthBuffer = new SharpDX.Direct3D11.Texture2D(DeviceManager.DeviceDirect3D, new SharpDX.Direct3D11.Texture2DDescription() { Format = SharpDX.DXGI.Format.D24_UNorm_S8_UInt, ArraySize = 1, MipLevels = 1, Width = (int)viewData.RenderTargetSize.Width, Height = (int)viewData.RenderTargetSize.Height, SampleDescription = new SharpDX.DXGI.SampleDescription(1, 0), BindFlags = SharpDX.Direct3D11.BindFlags.DepthStencil, })) viewData.DepthStencilView = Collect(new SharpDX.Direct3D11.DepthStencilView(DeviceManager.DeviceDirect3D, depthBuffer, new SharpDX.Direct3D11.DepthStencilViewDescription() { Dimension = SharpDX.Direct3D11.DepthStencilViewDimension.Texture2D })); // Now we set up the Direct2D render target bitmap linked to the swapchain. // Whenever we render to this bitmap, it will be directly rendered to the // swapchain associated with the window. var bitmapProperties = new SharpDX.Direct2D1.BitmapProperties1( new SharpDX.Direct2D1.PixelFormat(SharpDX.DXGI.Format.B8G8R8A8_UNorm, SharpDX.Direct2D1.AlphaMode.Premultiplied), DeviceManager.Dpi, DeviceManager.Dpi, SharpDX.Direct2D1.BitmapOptions.Target | SharpDX.Direct2D1.BitmapOptions.CannotDraw); // Direct2D needs the dxgi version of the backbuffer surface pointer. // Get a D2D surface from the DXGI back buffer to use as the D2D render target. viewData.BitmapTarget = Collect(new SharpDX.Direct2D1.Bitmap1(DeviceManager.ContextDirect2D, surface, bitmapProperties)); // Create a viewport descriptor of the full window size. viewData.Viewport = new SharpDX.ViewportF(position.X, position.Y, (float)viewData.RenderTargetSize.Width - position.X, (float)viewData.RenderTargetSize.Height - position.Y, 0.0f, 1.0f); } backBuffer = viewData.BackBuffer; renderTargetView = viewData.RenderTargetView; depthStencilView = viewData.DepthStencilView; RenderTargetBounds = new Rect(viewData.Viewport.X, viewData.Viewport.Y, viewData.Viewport.Width, viewData.Viewport.Height); bitmapTarget = viewData.BitmapTarget; DeviceManager.ContextDirect2D.Target = viewData.BitmapTarget; // Set the current viewport using the descriptor. DeviceManager.ContextDirect3D.Rasterizer.SetViewport(viewData.Viewport); // Perform the actual rendering of this target base.RenderAll(); } surfaceImageSourceNative.EndDraw(); }
public override bool IsPointInside(ref SharpDX.Point p) { return(false); }