/// <summary> /// Gets array if the slice is over an array, otherwise gets a pointer to memory. /// </summary> /// <returns>true if it's a span over an array; otherwise false (if over a pointer)</returns> /// <remarks>This method can be used for interop, while we are waiting for proper pinning support</remarks> public unsafe bool TryGetArrayElseGetPointer(out ArraySegment <T> array, out void *pointer) { var a = Object as T[]; if (a == null) { array = new ArraySegment <T>(); pointer = PtrUtils.ComputeAddress(Object, Offset).ToPointer(); return(false); } var offsetToData = SpanHelpers <T> .OffsetToArrayData; var index = (int)((Offset.ToUInt32() - offsetToData) / PtrUtils.SizeOf <T>()); array = new ArraySegment <T>(a, index, Length); pointer = null; return(true); }
public void Set(ReadOnlySpan <T> values) { if (Length < values.Length) { throw new ArgumentOutOfRangeException("values"); } // For native memory, use bulk copy if (Object == null && values.Object == null) { var source = PtrUtils.ComputeAddress(values.Object, values.Offset); var destination = PtrUtils.ComputeAddress(Object, Offset); var byteCount = values.Length * PtrUtils.SizeOf <T>(); PtrUtils.Copy(source, destination, byteCount); return; } for (int i = 0; i < values.Length; i++) { this[i] = values[i]; } }
/// <summary> /// Copies the contents of this span into another. The destination /// must be at least as big as the source, and may be bigger. /// </summary> /// <param name="destination">The span to copy items into.</param> public bool TryCopyTo(Span <T> destination) { if (Length > destination.Length) { return(false); } // For native memory, use bulk copy if (Object == null && destination.Object == null) { var source = PtrUtils.ComputeAddress(Object, Offset); var destinationPtr = PtrUtils.ComputeAddress(destination.Object, destination.Offset); var byteCount = Length * PtrUtils.SizeOf <T>(); PtrUtils.Copy(source, destinationPtr, byteCount); return(true); } for (int i = 0; i < Length; i++) { destination[i] = this[i]; } return(true); }
/// <summary> /// platform independent fast memory comparison /// for x64 it is as fast as memcmp of msvcrt.dll, for x86 it is up to two times faster!! /// </summary> internal static bool MemCmp <[Primitive] T>(Span <T> first, Span <T> second) where T : struct { unsafe { // prevent GC from moving memory var firstPinnedHandle = GCHandle.Alloc(first.Object, GCHandleType.Pinned); var secondPinnedHandle = GCHandle.Alloc(second.Object, GCHandleType.Pinned); try { byte *firstPointer = (byte *)PtrUtils.ComputeAddress(first.Object, first.Offset).ToPointer(); byte *secondPointer = (byte *)PtrUtils.ComputeAddress(second.Object, second.Offset).ToPointer(); int step = sizeof(void *) * 5; int totalBytesCount = first.Length * PtrUtils.SizeOf <T>(); byte *firstPointerLimit = firstPointer + (totalBytesCount - step); if (totalBytesCount > step) { while (firstPointer < firstPointerLimit) { // IMPORTANT: in order to get HUGE benefits of loop unrolling on x86 we use break instead of return if (*((void **)firstPointer + 0) != *((void **)secondPointer + 0)) { break; } if (*((void **)firstPointer + 1) != *((void **)secondPointer + 1)) { break; } if (*((void **)firstPointer + 2) != *((void **)secondPointer + 2)) { break; } if (*((void **)firstPointer + 3) != *((void **)secondPointer + 3)) { break; } if (*((void **)firstPointer + 4) != *((void **)secondPointer + 4)) { break; } firstPointer += step; secondPointer += step; } if (firstPointer < firstPointerLimit) // the upper loop ended with break; { return(false); } } firstPointerLimit += step; // lets check the remaining bytes while (firstPointer < firstPointerLimit) { if (*firstPointer != *secondPointer) { break; } ++firstPointer; ++secondPointer; } return(firstPointer == firstPointerLimit); } finally { if (firstPinnedHandle.IsAllocated) { firstPinnedHandle.Free(); } if (secondPinnedHandle.IsAllocated) { secondPinnedHandle.Free(); } } } }