internal static void ArrayViewAlignmentKernel <T>( Index1 index, ArrayView <T> data, ArrayView <long> prefixLength, ArrayView <long> mainLength, int alignmentInBytes, T element) where T : unmanaged { var(prefix, main) = data.AlignTo(alignmentInBytes); prefixLength[index] = prefix.Length; mainLength[index] = main.Length; if (index < prefix.Length) { prefix[index] = element; } Trace.Assert(main.Length > 0); main[index] = element; }
/// <summary> /// A simple 1D kernel using basic atomic functions. /// The second parameter (<paramref name="dataView"/>) represents the target /// view for all atomic operations. /// </summary> /// <param name="index">The current thread index.</param> /// <param name="dataView">The view pointing to our memory buffer.</param> /// <param name="constant">A uniform constant.</param> static void AtomicOperationKernel( Index1 index, // The global thread index (1D in this case) ArrayView <int> dataView, // A view to a chunk of memory (1D in this case) int constant) // A sample uniform constant { // dataView[0] += constant Atomic.Add(ref dataView[0], constant); // dataView[1] = Max(dataView[1], constant) Atomic.Max(ref dataView[1], constant); // dataView[2] = Min(dataView[2], constant) Atomic.Min(ref dataView[2], constant); // dataView[3] = Min(dataView[3], constant) Atomic.And(ref dataView[3], constant); // dataView[4] = Min(dataView[4], constant) Atomic.Or(ref dataView[4], constant); // dataView[6] = Min(dataView[5], constant) Atomic.Xor(ref dataView[5], constant); }
//</Snippet1> public static void MakeMemberAccess1() { //<Snippet1> // Add the following directive to your file // using Microsoft.Scripting.Ast; var MyInstance = new Index1(); MyInstance.X = 5; //This expression represents accessing a non indexed member, for either //assigning or reading its value. MemberExpression MyMakeMemberAccess = Expression.MakeMemberAccess( Expression.Constant(MyInstance), typeof(Index1).GetMember("X")[0] ); //The end result should 5: Console.WriteLine(Expression.Lambda<Func<int>>(MyMakeMemberAccess).Compile().Invoke()); //</Snippet1> //Validate sample if (Expression.Lambda<Func<int>>(MyMakeMemberAccess).Compile().Invoke() != 5) throw new Exception(); }
/// <summary cref="MemoryBuffer{T, TIndex}.CopyToView( /// AcceleratorStream, ArrayView{T}, Index1)"/> protected internal unsafe override void CopyToView( AcceleratorStream stream, ArrayView <T> target, Index1 sourceOffset) { var binding = Accelerator.BindScoped(); var targetBuffer = target.Source; var sourceAddress = new IntPtr(ComputeEffectiveAddress(sourceOffset)); var targetAddress = new IntPtr(target.LoadEffectiveAddress()); switch (targetBuffer.AcceleratorType) { case AcceleratorType.CPU: CudaException.ThrowIfFailed(CudaAPI.Current.MemcpyDeviceToHost( targetAddress, sourceAddress, new IntPtr(target.LengthInBytes), stream)); break; case AcceleratorType.Cuda: CudaException.ThrowIfFailed(CudaAPI.Current.MemcpyDeviceToDevice( targetAddress, sourceAddress, new IntPtr(target.LengthInBytes), stream)); break; default: throw new NotSupportedException( RuntimeErrorMessages.NotSupportedTargetAccelerator); } binding.Recover(); }
/// <summary cref="MemoryBuffer{T, TIndex}.CopyFromView( /// AcceleratorStream, ArrayView{T}, Index1)"/> protected internal unsafe override void CopyFromView( AcceleratorStream stream, ArrayView <T> source, Index1 targetOffset) { var clStream = (CLStream)stream; switch (source.AcceleratorType) { case AcceleratorType.CPU: CLException.ThrowIfFailed( CLAPI.WriteBuffer( clStream.CommandQueue, NativePtr, false, new IntPtr(targetOffset * ElementSize), new IntPtr(source.LengthInBytes), new IntPtr(source.LoadEffectiveAddress()))); break; case AcceleratorType.OpenCL: CLException.ThrowIfFailed( CLAPI.CopyBuffer( clStream.CommandQueue, source.Source.NativePtr, NativePtr, new IntPtr(source.Index * ElementSize), new IntPtr(targetOffset * ElementSize), new IntPtr(source.LengthInBytes))); break; default: throw new NotSupportedException( RuntimeErrorMessages.NotSupportedTargetAccelerator); } }
//</Snippet1> public static void MakeIndex1() { //<Snippet1> // Add the following directive to your file // using Microsoft.Scripting.Ast; var MyInstance = new Index1(); MyInstance[0] = 5; //This expression represents accessing an indexed member, such as //an array or an indexed property. IndexExpression MyMakeIndex = Expression.MakeIndex( Expression.Constant(MyInstance), typeof(Index1).GetProperty("Item"), new Expression[] {Expression.Constant(1)} ); //The end result should 6: Console.WriteLine(Expression.Lambda<Func<int>>(MyMakeIndex).Compile().Invoke()); //</Snippet1> //Validate sample if (Expression.Lambda<Func<int>>(MyMakeIndex).Compile().Invoke() != 6) throw new Exception(); }
internal static void ArraySimpleDivergentKernel <T, TArraySize>( Index1 index, ArrayView <T> data, T c, int localIndex) where T : unmanaged where TArraySize : unmanaged, ILength { TArraySize arraySize = default; if (index > 10) { var array = new T[arraySize.Length]; for (int i = 0; i < arraySize.Length; ++i) { array[i] = c; } data[index] = array[localIndex]; } else { data[index] = c; } }
internal static void ArrayViewExtentKernel( Index1 index, ArrayView <int> data) { data[index] = data.IntExtent.X; }
internal static void ArrayViewLongExtentKernel( Index1 index, ArrayView <long> data) { data[index] = data.Extent.X; }
/// <summary> /// Copies the current contents into a new byte array. /// </summary> /// <param name="stream">The used accelerator stream.</param> /// <param name="byteOffset">The offset in bytes.</param> /// <param name="byteExtent">The extent in bytes (number of elements).</param> /// <returns>A new array holding the requested contents.</returns> protected internal abstract ArraySegment <byte> GetAsRawArray( AcceleratorStream stream, Index1 byteOffset, Index1 byteExtent);
internal static void ArrayViewValidKernel(Index1 index, ArrayView <int> data) { ArrayView <int> invalid = default; data[index] = (data.IsValid ? 1 : 0) + (!invalid.IsValid ? 1 : 0); }
public void StalenessShouldWorkProperlyWhenReferenceIsChanged() { using (var store = GetDocumentStore()) { var index1 = new Index1(); index1.Execute(store); var index2 = new Index2(); index2.Execute(store); using (var session = store.OpenSession()) { var address = new Address { City = "New York" }; session.Store(address); session.Store(new User { AddressId = address.Id }); session.SaveChanges(); } Indexes.WaitForIndexing(store); var stats = store.Maintenance.Send(new GetIndexStatisticsOperation(index1.IndexName)); Assert.False(stats.IsStale); stats = store.Maintenance.Send(new GetIndexStatisticsOperation(index2.IndexName)); Assert.False(stats.IsStale); store.Maintenance.Send(new StopIndexingOperation()); stats = store.Maintenance.Send(new GetIndexStatisticsOperation(index1.IndexName)); Assert.False(stats.IsStale); stats = store.Maintenance.Send(new GetIndexStatisticsOperation(index2.IndexName)); Assert.False(stats.IsStale); using (var session = store.OpenSession()) { var address = session.Load <Address>("addresses/1-A"); address.City = "Barcelona"; session.SaveChanges(); } stats = store.Maintenance.Send(new GetIndexStatisticsOperation(index1.IndexName)); Assert.True(stats.IsStale); stats = store.Maintenance.Send(new GetIndexStatisticsOperation(index2.IndexName)); Assert.True(stats.IsStale); store.Maintenance.Send(new StartIndexingOperation()); Indexes.WaitForIndexing(store); stats = store.Maintenance.Send(new GetIndexStatisticsOperation(index1.IndexName)); Assert.False(stats.IsStale); stats = store.Maintenance.Send(new GetIndexStatisticsOperation(index2.IndexName)); Assert.False(stats.IsStale); } }
/// <summary> /// Returns the value at the given index. /// </summary> /// <typeparam name="T">The element type.</typeparam> /// <param name="array">The source array.</param> /// <param name="index">The element index.</param> /// <returns>The value at the given index.</returns> public static T GetValue <T>(this T[] array, Index1 index) => array[index];
public static (Index1, Index1) ComputeGridStrideLoopExtent( this Accelerator accelerator, Index1 numDataElements) =>
public static void HashStringsBatchOptimized( Index1 index, ArrayView <byte> charset, // 1D array holding the charset bytes ArrayView <uint> cryptTable, // 1D array crypt table used for hash computation ArrayView2D <int> charsetIndexes, // 2D array containing the char indexes of one batch string seed (one string per line, hashes will be computed starting from this string) ArrayView <byte> suffixBytes, // 1D array holding the indexes of the suffix chars uint hashALookup, // The hash A that we are looking for uint hashBLookup, // The hash B that we are looking for uint prefixSeed1a, // Pre-computed hash A seed 1 for the string prefix uint prefixSeed2a, // Pre-computed hash A seed 2 for the string prefix uint prefixSeed1b, // Pre-computed hash B seed 1 for the string prefix uint prefixSeed2b, // Pre-computed hash B seed 2 for the string prefix bool firstBatch, int nameCount, // Name count limit (used as return condition) int batchCharCount, // MAX = 8 // Number of generated chars in the batch ArrayView <int> foundNameCharsetIndexes // 1D array containing the found name (if found) ) { // Brute force increment variables int generatedCharIndex = 0; // Hash variables uint ch; // Current char of the processed string uint s1, s2; // Hash seeds int typeA = 0x100; // Hash type A int typeB = 0x200; // Hash type B bool suffix = true; if (suffixBytes[0] == 0) { suffix = false; } // Hash precalculated seeds (after prefix) uint[] precalcSeeds1 = new uint[8]; uint[] precalcSeeds2 = new uint[8]; precalcSeeds1[0] = prefixSeed1a; precalcSeeds2[0] = prefixSeed2a; int precalcSeedIndex = 0; // Brute force increment preparation // Increase name count to !numChars-1 for first batch first name seed if (firstBatch && index == 0) { nameCount = -1; for (int i = 1; i <= batchCharCount; ++i) { int temp = 1; for (int j = 0; j < i; j++) { temp *= (int)charset.Length; } nameCount += temp; if (i == batchCharCount) { temp = 1; for (int j = 0; j < i; j++) { temp *= (int)charset.Length; } nameCount += temp; } } } // Find the position of the last generated char for (int i = 0; i < charsetIndexes.Height; ++i) { Index2 idx = new Index2(index.X, i); int charIndex = charsetIndexes[idx]; if (charsetIndexes[idx] == -1) { generatedCharIndex = i - 1; break; } } // For each name compute hash while (nameCount != 0) { // Subsequent names s1 = precalcSeeds1[precalcSeedIndex]; s2 = precalcSeeds2[precalcSeedIndex]; // Hash calculation for (int i = precalcSeedIndex; i < charsetIndexes.Height; ++i) { // Retrieve the current char of the string Index1 charsetIdx = charsetIndexes[new Index2(index.X, i)]; if (charsetIdx == -1) // break if end of the string is reached { break; } ch = charset[charsetIdx]; // Hash calculation s1 = cryptTable[typeA + ch] ^ (s1 + s2); s2 = ch + s1 + s2 + (s2 << 5) + 3; // Store precalc seeds except if we are at the last character of the string // (then it's not needed because this char changes constantly) if (i < generatedCharIndex) { precalcSeeds1[precalcSeedIndex + 1] = s1; precalcSeeds2[precalcSeedIndex + 1] = s2; precalcSeedIndex++; } } // Process suffix if (suffix) { for (int i = 0; i < suffixBytes.Length; ++i) { // Retrieve current suffix char ch = suffixBytes[i]; // Hash calculation s1 = cryptTable[typeA + ch] ^ (s1 + s2); s2 = ch + s1 + s2 + (s2 << 5) + 3; } } // Check if it matches the hash that we are looking for // No precalculation because this is only executed on matches and collisions if (s1 == hashALookup) { s1 = prefixSeed1b; s2 = prefixSeed2b; for (int i = 0; i < charsetIndexes.Height; ++i) { // Retrieve the current char of the string Index1 charsetIdx = charsetIndexes[new Index2(index.X, i)]; if (charsetIdx == -1) // break if end of the string is reached { break; } ch = charset[charsetIdx]; // Hash calculation s1 = cryptTable[typeB + ch] ^ (s1 + s2); s2 = ch + s1 + s2 + (s2 << 5) + 3; } // Process suffix if (suffix) { for (int i = 0; i < suffixBytes.Length; ++i) { // Retrieve current suffix char ch = suffixBytes[i]; // Hash calculation s1 = cryptTable[typeB + ch] ^ (s1 + s2); s2 = ch + s1 + s2 + (s2 << 5) + 3; } } if (s1 == hashBLookup) { // Populate foundNameCharsetIndexes and return for (int i = 0; i < charsetIndexes.Height; ++i) { foundNameCharsetIndexes[i] = charsetIndexes[new Index2(index.X, i)]; } return; } } // Move to next name in the batch (brute force increment) // If we are AT the last char of the charset if (charsetIndexes[new Index2(index.X, generatedCharIndex)] == charset.Length - 1) { bool increaseNameSize = false; // Go through all the charset indexes in reverse order int stopValue = generatedCharIndex - batchCharCount + 1; if (firstBatch) { stopValue = 0; } for (int i = generatedCharIndex; i >= stopValue; --i) { // Retrieve the current char of the string Index2 idx = new Index2(index.X, i); // If we are at the last char of the charset then go back to the first char if (charsetIndexes[idx] == charset.Length - 1) { charsetIndexes[idx] = 0; if (i == 0) { increaseNameSize = true; } // Go back in the precalc seeds (to recalculate since the char changed) if (precalcSeedIndex > 0) { precalcSeedIndex--; } } // If we are not at the last char of the charset then move to next char else { charsetIndexes[idx]++; break; } } if (increaseNameSize) { // Increase name size by one char generatedCharIndex++; charsetIndexes[new Index2(index.X, generatedCharIndex)] = 0; } } // If the generated char is within the charset else { // Move to next char charsetIndexes[new Index2(index.X, generatedCharIndex)]++; } nameCount--; } }
static double Evaluate(int individualIndex, int independentsRowIndex, ArrayView2D <double> independents, ArrayView <NodeGPU> nodes, ArrayView <int> nodeArrayStarts) { for (int nodeIndex = 0; nodeIndex < nodeArrayStarts.Length; nodeIndex++) { Index1 currentNodeIndex = new Index1(nodeArrayStarts[individualIndex] + nodeIndex); //NodeGPU currentNode = nodes[currentNodeIndex]; if (nodes[currentNodeIndex].IndependentIndex >= 0) { int independentIndex = nodes[currentNodeIndex].IndependentIndex; nodes[currentNodeIndex].Number = independents[independentsRowIndex, independentIndex]; } else if (nodes[currentNodeIndex].OperatorIndex >= 0) { Index1 branchIndex1 = new Index1(nodeArrayStarts[individualIndex] + nodes[currentNodeIndex].Branch1); Index1 branchIndex2 = new Index1(nodeArrayStarts[individualIndex] + nodes[currentNodeIndex].Branch2); if (nodes[currentNodeIndex].OperatorIndex < 6) { if (nodes[currentNodeIndex].OperatorIndex < 4) { if (nodes[currentNodeIndex].OperatorIndex == 2) { nodes[currentNodeIndex].Number = nodes[branchIndex1].Number + nodes[branchIndex2].Number; } else if (nodes[currentNodeIndex].OperatorIndex == 3) { nodes[currentNodeIndex].Number = nodes[branchIndex1].Number - nodes[branchIndex2].Number; } } else { if (nodes[currentNodeIndex].OperatorIndex == 4) { nodes[currentNodeIndex].Number = nodes[branchIndex1].Number * nodes[branchIndex2].Number; } else if (nodes[currentNodeIndex].OperatorIndex == 5) { nodes[currentNodeIndex].Number = nodes[branchIndex1].Number / nodes[branchIndex2].Number; } } } else if (nodes[currentNodeIndex].OperatorIndex >= 6 && nodes[currentNodeIndex].OperatorIndex <= 15) { if (nodes[currentNodeIndex].OperatorIndex == 6) { nodes[currentNodeIndex].Number = -nodes[branchIndex1].Number; } else if (nodes[currentNodeIndex].OperatorIndex == 8) { nodes[currentNodeIndex].Number = XMath.Sin(nodes[branchIndex1].Number); } else if (nodes[currentNodeIndex].OperatorIndex == 9) { nodes[currentNodeIndex].Number = XMath.Cos(nodes[branchIndex1].Number); } else if (nodes[currentNodeIndex].OperatorIndex == 14) { nodes[currentNodeIndex].Number = XMath.Pow(nodes[branchIndex1].Number, nodes[branchIndex2].Number); } else if (nodes[currentNodeIndex].OperatorIndex == 15) { nodes[currentNodeIndex].Number = XMath.Sign(nodes[branchIndex1].Number); } } else { Index1 branchIndex3 = new Index1(nodeArrayStarts[individualIndex] + nodes[currentNodeIndex].Branch3); Index1 branchIndex4 = new Index1(nodeArrayStarts[individualIndex] + nodes[currentNodeIndex].Branch4); if (nodes[currentNodeIndex].OperatorIndex == 18) { if (nodes[branchIndex1].Number == nodes[branchIndex2].Number) { nodes[currentNodeIndex].Number = nodes[branchIndex3].Number; } else { nodes[currentNodeIndex].Number = nodes[branchIndex4].Number; } } else if (nodes[currentNodeIndex].OperatorIndex == 19) { if (nodes[branchIndex1].Number < nodes[branchIndex2].Number) { nodes[currentNodeIndex].Number = nodes[branchIndex3].Number; } else { nodes[currentNodeIndex].Number = nodes[branchIndex4].Number; } } else if (nodes[currentNodeIndex].OperatorIndex == 20) { if (nodes[branchIndex1].Number <= nodes[branchIndex2].Number) { nodes[currentNodeIndex].Number = nodes[branchIndex3].Number; } else { nodes[currentNodeIndex].Number = nodes[branchIndex4].Number; } } else if (nodes[currentNodeIndex].OperatorIndex == 21) { if (nodes[branchIndex1].Number == 0) { nodes[currentNodeIndex].Number = nodes[branchIndex2].Number; } else { nodes[currentNodeIndex].Number = nodes[branchIndex3].Number; } } else if (nodes[currentNodeIndex].OperatorIndex == 22) { if (nodes[branchIndex1].Number == 1) { nodes[currentNodeIndex].Number = nodes[branchIndex2].Number; } else { nodes[currentNodeIndex].Number = nodes[branchIndex3].Number; } } } if (nodes[currentNodeIndex].Number == double.NaN) { return(double.NaN); } } if (nodes[currentNodeIndex].IsRoot == 1) { return(nodes[currentNodeIndex].Number); } } return(double.NaN); }
/// <summary> /// Performs the first radix-sort pass. /// </summary> /// <typeparam name="T">The element type.</typeparam> /// <typeparam name="TOperation">The radix-sort operation.</typeparam> /// <typeparam name="TSpecialization">The specialization type.</typeparam> /// <param name="view">The input view to use.</param> /// <param name="counter">The global counter view.</param> /// <param name="groupSize">The number of threads in the group.</param> /// <param name="numGroups">The number of virtually launched groups.</param> /// <param name="paddedLength">The padded length of the input view.</param> /// <param name="shift">The bit shift to use.</param> internal static void RadixSortKernel1 <T, TOperation, TSpecialization>( ArrayView <T> view, ArrayView <int> counter, SpecializedValue <int> groupSize, int numGroups, int paddedLength, int shift) where T : unmanaged where TOperation : struct, IRadixSortOperation <T> where TSpecialization : struct, IRadixSortSpecialization { TSpecialization specialization = default; var scanMemory = SharedMemory.Allocate <int>( groupSize * specialization.UnrollFactor); int gridIdx = Grid.IdxX; for ( int i = Grid.GlobalIndex.X; i < paddedLength; i += GridExtensions.GridStrideLoopStride) { bool inRange = i < view.Length; // Read value from global memory TOperation operation = default; T value = operation.DefaultValue; if (inRange) { value = view[i]; } var bits = operation.ExtractRadixBits( value, shift, specialization.UnrollFactor - 1); for (int j = 0; j < specialization.UnrollFactor; ++j) { scanMemory[Group.IdxX + groupSize * j] = 0; } if (inRange) { scanMemory[Group.IdxX + groupSize * bits] = 1; } Group.Barrier(); for (int j = 0; j < specialization.UnrollFactor; ++j) { var address = Group.IdxX + groupSize * j; scanMemory[address] = GroupExtensions.ExclusiveScan <int, AddInt32>(scanMemory[address]); } Group.Barrier(); if (Group.IdxX == Group.DimX - 1) { // Write counters to global memory for (int j = 0; j < specialization.UnrollFactor; ++j) { ref var newOffset = ref scanMemory[Group.IdxX + groupSize * j]; newOffset += Utilities.Select(inRange & j == bits, 1, 0); counter[j * numGroups + gridIdx] = newOffset; } } Group.Barrier(); var gridSize = gridIdx * Group.DimX; Index1 pos = gridSize + scanMemory[Group.IdxX + groupSize * bits] - Utilities.Select(inRange & Group.IdxX == Group.DimX - 1, 1, 0); for (int j = 1; j <= bits; ++j) { pos += scanMemory[groupSize * j - 1] + Utilities.Select(j - 1 == bits, 1, 0); } // Pre-sort the current value into the corresponding segment if (inRange) { view[pos] = value; } Group.Barrier(); gridIdx += Grid.DimX; }
internal static void DebugAssertMessageKernel( Index1 index, ArrayView <int> data) { Debug.Assert(data[index] >= 0, "Invalid kernel argument"); }
internal static void DebugAssertKernel( Index1 index, ArrayView <int> data) { Debug.Assert(data[index] >= 0); }
/// <summary> /// The actual unique kernel implementation. /// </summary> /// <typeparam name="T">The element type.</typeparam> /// <typeparam name="TComparisonOperation">The comparison operation.</typeparam> /// <param name="input">The input view.</param> /// <param name="output">The output view to store the new length.</param> /// <param name="sequentialGroupExecutor"> /// The sequential group executor to use. /// </param> /// <param name="tileSize">The tile size.</param> /// <param name="numIterationsPerGroup"> /// The number of iterations per group. /// </param> internal static void UniqueKernel <T, TComparisonOperation>( ArrayView <T> input, ArrayView <long> output, SequentialGroupExecutor sequentialGroupExecutor, SpecializedValue <int> tileSize, Index1 numIterationsPerGroup) where T : unmanaged where TComparisonOperation : struct, IComparisonOperation <T> { TComparisonOperation comparison = default; var isFirstGrid = Grid.IdxX == 0; var tileInfo = new TileInfo <T>(input, numIterationsPerGroup); // Sync groups and wait for the current one to become active sequentialGroupExecutor.Wait(); var temp = SharedMemory.Allocate <bool>(tileSize); var startIdx = Grid.ComputeGlobalIndex(Grid.IdxX, 0); for ( int i = tileInfo.StartIndex; i < tileInfo.MaxLength; i += Group.DimX) { if (Group.IsFirstThread && i == tileInfo.StartIndex && isFirstGrid) { temp[0] = true; } else { var currIdx = i; var prevIdx = Group.IsFirstThread && i == tileInfo.StartIndex ? output[0] - 1 : currIdx - 1; temp[currIdx - startIdx] = comparison.Compare(input[currIdx], input[prevIdx]) != 0; } } Group.Barrier(); if (Group.IsFirstThread) { var offset = isFirstGrid ? 0 : output[0]; var maxLength = XMath.Min(startIdx + temp.IntLength, tileInfo.MaxLength) - startIdx; for (var i = 0; i < maxLength; i++) { if (temp[i]) { input[offset++] = input[startIdx + i]; } } output[0] = offset; } MemoryFence.DeviceLevel(); Group.Barrier(); sequentialGroupExecutor.Release(); }
internal static void ArrayViewLengthInBytesKernel( Index1 index, ArrayView <long> data) { data[index] = data.LengthInBytes; }
internal static void SpanKernel(Index1 index, ArrayView <int, LongIndex1> data) { data[index] = data[index] - 5; }
public static void HashStringsBatch( Index1 index, ArrayView <byte> charset, // 1D array holding the charset bytes ArrayView <uint> cryptTable, // 1D array crypt table used for hash computation ArrayView2D <int> charsetIndexes, // 2D array containing the char indexes of one batch string seed (one string per line, hashes will be computed starting from this string) ArrayView <byte> suffixBytes, // 1D array holding the indexes of the suffix chars uint hashALookup, // The hash A that we are looking for uint hashBLookup, // The hash B that we are looking for uint seed1a, // Pre-computed hash A seed 1 for the string prefix uint seed2a, // Pre-computed hash A seed 2 for the string prefix uint seed1b, // Pre-computed hash B seed 1 for the string prefix uint seed2b, // Pre-computed hash B seed 2 for the string prefix bool firstBatch, int nameCount, // Name count limit (used as return condition) int batchCharCount, // Number of generated chars in the batch ArrayView <int> foundNameCharsetIndexes // 1D array containing the found name (if found) ) { // Brute force increment variables int generatedCharIndex = 0; // Hash variables uint ch; // Current char of the processed string int typeA = 0x100; // Hash type A int typeB = 0x200; // Hash type B bool suffix = true; if (suffixBytes[0] == 0) { suffix = false; } // Brute force increment preparation // Increase name count to !numChars-1 for first batch first name seed if (firstBatch && index == 0) { nameCount = -1; for (int i = 1; i <= batchCharCount; i++) { int temp = 1; for (int j = 0; j < i; j++) { temp *= (int)charset.Length; } nameCount += temp; if (i == batchCharCount) { temp = 1; for (int j = 0; j < i; j++) { temp *= (int)charset.Length; } nameCount += temp; } } } // Find the position of the last generated char for (int i = 0; i < charsetIndexes.Height; i++) { Index2 idx = new Index2(index.X, i); int charIndex = charsetIndexes[idx]; if (charsetIndexes[idx] == -1) { generatedCharIndex = i - 1; break; } } // For each name compute hash while (nameCount != 0) { uint s1 = seed1a; uint s2 = seed2a; for (int i = 0; i < charsetIndexes.Height; i++) { // Retrieve the current char of the string Index1 charsetIdx = charsetIndexes[new Index2(index.X, i)]; if (charsetIdx == -1) // break if end of the string is reached { break; } ch = charset[charsetIdx]; // Hash calculation s1 = cryptTable[typeA + ch] ^ (s1 + s2); s2 = ch + s1 + s2 + (s2 << 5) + 3; } // Process suffix if (suffix) { for (int i = 0; i < suffixBytes.Length; i++) { // Retrieve current suffix char ch = suffixBytes[i]; // Hash calculation s1 = cryptTable[typeA + ch] ^ (s1 + s2); s2 = ch + s1 + s2 + (s2 << 5) + 3; } } // Check if it matches the hash that we are looking for if (s1 == hashALookup) { s1 = seed1b; s2 = seed2b; for (int i = 0; i < charsetIndexes.Height; i++) { // Retrieve the current char of the string Index1 charsetIdx = charsetIndexes[new Index2(index.X, i)]; if (charsetIdx == -1) // break if end of the string is reached { break; } ch = charset[charsetIdx]; // Hash calculation s1 = cryptTable[typeB + ch] ^ (s1 + s2); s2 = ch + s1 + s2 + (s2 << 5) + 3; } // Process suffix if (suffix) { for (int i = 0; i < suffixBytes.Length; i++) { // Retrieve current suffix char ch = suffixBytes[i]; // Hash calculation s1 = cryptTable[typeB + ch] ^ (s1 + s2); s2 = ch + s1 + s2 + (s2 << 5) + 3; } } if (s1 == hashBLookup) { // Populate foundNameCharsetIndexes and return for (int i = 0; i < charsetIndexes.Height; i++) { foundNameCharsetIndexes[i] = charsetIndexes[new Index2(index.X, i)]; } return; } } // Move to next name in the batch (brute force increment) // Debug /* * var tes0 = charsetIndexes[new Index2(index.X,0)]; * var tes1 = charsetIndexes[new Index2(index.X,1)]; * var tes2 = charsetIndexes[new Index2(index.X,2)]; * var tes3 = charsetIndexes[new Index2(index.X,3)]; * var tes4 = charsetIndexes[new Index2(index.X,4)]; * var tes5 = charsetIndexes[new Index2(index.X,5)]; */ // If we are AT the last char of the charset if (charsetIndexes[new Index2(index.X, generatedCharIndex)] == charset.Length - 1) { bool increaseNameSize = false; // Go through all the charset indexes in reverse order int stopValue = generatedCharIndex - batchCharCount + 1; if (firstBatch) { stopValue = 0; } for (int i = generatedCharIndex; i >= stopValue; --i) { // Retrieve the current char of the string Index2 idx = new Index2(index.X, i); // If we are at the last char of the charset then go back to the first char if (charsetIndexes[idx] == charset.Length - 1) { charsetIndexes[idx] = 0; if (i == 0) { increaseNameSize = true; } } // If we are not at the last char of the charset then move to next char else { charsetIndexes[idx]++; break; } } if (increaseNameSize) { // Increase name size by one char generatedCharIndex++; charsetIndexes[new Index2(index.X, generatedCharIndex)] = 0; } } // If the generated char is within the charset else { // Move to next char charsetIndexes[new Index2(index.X, generatedCharIndex)]++; } nameCount--; } /* * // Debug * var test0 = charsetIndexes[new Index2(index.X,0)]; * var test1 = charsetIndexes[new Index2(index.X,1)]; * var test2 = charsetIndexes[new Index2(index.X,2)]; * var test3 = charsetIndexes[new Index2(index.X,3)]; * var test4 = charsetIndexes[new Index2(index.X,4)]; * var test5 = charsetIndexes[new Index2(index.X,5)]; */ }
/// <summary> /// Computes the sequence element for the corresponding <paramref name="sequenceIndex"/>. /// </summary> /// <param name="sequenceIndex">The sequence index for the computation of the corresponding value.</param> /// <returns>The computed sequence value.</returns> public CustomStruct ComputeSequenceElement(Index1 sequenceIndex) => new CustomStruct() { First = sequenceIndex, Second = SecondOffset + sequenceIndex };
internal static void Index1EntryPointKernel(Index1 index, ArrayView <int> output) { output[index] = index; }
internal static void ArrayViewLengthKernel( Index1 index, ArrayView <int> data) { data[index] = data.IntLength; }
/// <summary> /// Sets the value at the given index to the given one. /// </summary> /// <typeparam name="T">The element type.</typeparam> /// <param name="array">The target array.</param> /// <param name="value">The value to set.</param> /// <param name="index">The element index.</param> public static void SetValue <T>(this T[] array, T value, Index1 index) => array[index] = value;
internal static void ArrayViewLongLengthKernel( Index1 index, ArrayView <long> data) { data[index] = data.Length; }
internal static void CopyKernel( Index1 index, ArrayView <long, LongIndex1> data) { data[index] -= 5; }
/// <summary> /// Copies the current contents into a new byte array. /// </summary> /// <param name="byteOffset">The offset in bytes.</param> /// <param name="byteExtent">The extent in bytes (number of elements).</param> /// <returns>A new array holding the requested contents.</returns> internal ArraySegment <byte> GetAsDebugRawArray(Index1 byteOffset, Index1 byteExtent) => GetAsRawArray(Accelerator.DefaultStream, byteOffset, byteExtent);
static void Main(string[] args) { string path = @"F:\邹静\各门课资料\论文\毕业论文\论文\第二次试验过程\数据库.shp"; string path1 = @"F:\邹静\各门课资料\论文\毕业论文\论文\先简化后缩放\简化5米\A5\A5.shp"; ////string path = @"H:\test\质心在外_终.shp"; ////string path1 = @"H:\test\简化_0501.shp"; Collection <IFeature> features = new SuperShpReader(path).ReadAll(); Collection <IFeature> features1 = new SuperShpReader(path1).ReadAll(); //////////////最大线 ////LineStringOutput.maxLineOutput(features1); ////////////////所有线 //////////LineStringOutput.linesOuput(features); //////////////线与外圆相交后的多边形 ////LineStringOutput.outPolygonOuput(features1); ////////////// 最大圆 ////LineStringOutput.maxCircleOutput(features1); REngine.SetEnvironmentVariables(); // <-- May be omitted; the next line would call it. REngine engine = REngine.GetInstance(); engine.Initialize(); for (int j = 0; j < 1; j++) { Console.WriteLine("第{0}个参数", j); int symbol = j; IList <int> count = new List <int>(); IList <int> count1 = new List <int>(); IList <double> rowRes = new List <double>(); IList <double> rowRes0 = new List <double>(); IList <ILineString> maxLine = new List <ILineString>(); IList <ILineString> maxLine1 = new List <ILineString>(); IList <double> maxLines = new List <double>(); //int w = 0; for (int q = 0; q < features.Count; q++) { //maxLine.Add(MaxPoints.getMaxVector(features[q])); //double circle = Circles.getCircle(maxLine[q].Coordinates[0], maxLine[q].Coordinates[1]).Length; //maxLines.Add(MaxPoints.getMaxVector(features[q]).Length / circle); Index1 row1 = new Index1(features[q]); int ab = 0; foreach (var item in row1.writeToCSV1(q, symbol)) { rowRes.Add(item); ab++; } count1.Add(ab); count.Add(ab); } #region // //for (int w = 0; w < count[q]; w++) // //{ // for (int v = q; v < count.Count; v++) // { // count[v] = count[v] +w; // double max = rowRes[w]; // double min = rowRes[w]; // for (int L = w; L < count[v]; L++) // { // if (max < rowRes[L]) // { // max = rowRes[L]; // } // if (min > rowRes[L]) // { // min = rowRes[L]; // } // } // for (int k = w; k < count[v]; k++) // { // if (rowRes[k] != 0) // { // rowRes[k] = (rowRes[k] - min) / (max - min); // } // } // w = count[v]; // } //} //double maxLength = 0; #endregion for (int i = 0; i < features1.Count; i++) { //maxLine1.Add(MaxPoints.getMaxVector(features1[i])); //double circle1 = Circles.getCircle(maxLine1[i].Coordinates[0], maxLine1[i].Coordinates[1]).Length; //maxLength = MaxPoints.getMaxVector(features1[i]).Length / circle1; IList <double> rowRes1 = new List <double>(); Index1 row = new Index1(features1[i]); foreach (var item in row.writeToCSV1(i, symbol)) { rowRes1.Add(item); } //double max = rowRes1[0]; //double min = rowRes1[0]; //for (int k = 1; k < rowRes1.Count; k++) //{ // if (max < rowRes1[k]) // { // max = rowRes1[k]; // } // if (min > rowRes1[k]) // { // min = rowRes1[k]; // } //} //for (int k = 0; k < rowRes1.Count; k++) //{ // rowRes1[k] = (rowRes1[k] - min) / (max - min); //} RDotNet.NumericVector V1 = engine.CreateNumericVector(rowRes1); engine.SetSymbol("V1", V1); int a = 0; IList <double> pValue = new List <double>(); for (int m = 0; m < features.Count; m++) { //if (maxLines[m] - 1 <= maxLength && maxLength <= maxLines[m] + 1) //{ IList <double> rowRes2 = new List <double>(); int n = 0; for (int b = m; b < count1[m] + m; b++) { rowRes2.Add(rowRes[b + a]); //要把第一个的去掉 n++; } a = a + n - 1; #region //double max1 = rowRes2[0]; //double min1 = rowRes2[0]; //for (int c = 0; c < rowRes2.Count; c++) //{ // if (max1 < rowRes2[c]) // { // max1 = rowRes2[c]; // } // if (min1 > rowRes2[c]) // { // min1 = rowRes2[c]; // } //} //for (int k1 = 0; k1 < rowRes2.Count; k1++) //{ // rowRes2[k1] = (rowRes2[k1] - min1) / (max1 - min1); //} #endregion RDotNet.NumericVector V2 = engine.CreateNumericVector(rowRes2); engine.SetSymbol("V2", V2); //GenericVector testRes = engine.Evaluate("wilcox.test(V1,V2, paired = FALSE)").AsList(); if (V1.Length == V2.Length) { //GenericVector testRes = engine.Evaluate("cor.test(V1,V2)").AsList(); //double p = testRes["p.value"].AsNumeric().First(); ////using (StreamWriter sw = new StreamWriter(@"C:\Users\Administrator\Desktop\result\shiyan0.txt", true)) ////{ //// sw.Write("{0}与{1}的P-Value={2}\r\n", i, m, p); ////} double sum = 0; double v1Power = 0; double v2Power = 0; for (int d = 0; d < V1.Length; d++) { sum += V1[d] * V2[d]; v1Power += Math.Pow(V1[d], 2); v2Power += Math.Pow(V2[d], 2); } double p = sum / (Math.Sqrt(v1Power) * Math.Sqrt(v2Power)); ////////string path_result= string.Format(@"H:\test\experiment\result\simplify2_result{0}.txt", i); //输出所有P值 using (StreamWriter sw = new StreamWriter(@"F:\邹静\各门课资料\论文\毕业论文\论文\先简化后缩放\简化5米\A5\A5.txt", true)) { sw.Write("{0}与{1}的P-Value={2}\r\n", i, m, p); //pValue.Add(p); } #region //输出V1,V2 //using (StreamWriter sw = new StreamWriter(@"H:\test\experiment\v1ceshi.txt", true)) //{ // foreach (var item in V1) // { // sw.Write(item + "\r\n"); // } // sw.Write("第{0}个参数换\r\n", j); //} //using (StreamWriter sw = new StreamWriter(@"H:\test\experiment\v2ceshi.txt", true)) //{ // foreach (var item in V2) // { // sw.Write(item + "\r\n"); // } // sw.Write("第{0}个参数换\r\n", j); //} #endregion } else { continue; } //} //else //{ // a += count[m] - 1; //} } #region //double max = pValue[0]; //for (int aa = 0; aa < pValue.Count; aa++) //{ // if (pValue[aa] > max) // { // max = pValue[aa]; // } //} //using (StreamWriter sw = new StreamWriter(@"F:\邹静\各门课资料\论文\毕业论文\论文\先简化后缩放\简化3米\A1\A1_最大值.txt", true)) //{ // sw.Write("{0}与{1}的最大值={2}\r\n", i,m, max); //} #endregion } } Console.WriteLine("成功"); engine.Dispose(); #region //实验二 //REngine.SetEnvironmentVariables(); // <-- May be omitted; the next line would call it. //REngine engine = REngine.GetInstance(); ////////engine.Initialize(); //for (int j = 1; j < 4; j++) //{ // Console.WriteLine("第{0}个参数", j); // int symbol = j; // IList<int> count = new List<int>(); // IList<double> rowRes = new List<double>(); // IList<ILineString> maxLine = new List<ILineString>(); // IList<double> maxLines = new List<double>(); // for (int q = 0; q < features.Count; q++) // { // maxLine.Add(MaxPoints.getMaxVector(features[q])); // double circle = Circles.getCircle(maxLine[q].Coordinates[0], maxLine[q].Coordinates[1]).Length; // maxLines.Add(MaxPoints.getMaxVector(features[q]).Length / circle); // Index1 row1 = new Index1(features[q]); // int a = 0; // foreach (var item in row1.writeToCSV1(q, symbol)) // { // rowRes.Add(item); // a++; // } // count.Add(a); // double max = 0; // double min = 0; // for (int l = 1; l < rowRes.Count; l++) // { // if (max < rowRes[l]) // { // max = rowRes[l]; // } // if (min > rowRes[l]) // { // min = rowRes[l]; // } // } // for (int k = 0; k < rowRes.Count; k++) // { // if (rowRes[k] != 0) // { // rowRes[k] = (rowRes[k] - min) / (max - min); // } // } // } // double maxLength = 0; // for (int i = 0; i < features1.Count; i++) // { // maxLine.Add(MaxPoints.getMaxVector(features1[i])); // double circle = Circles.getCircle(maxLine[i].Coordinates[0], maxLine[i].Coordinates[1]).Length; // maxLength = MaxPoints.getMaxVector(features1[i]).Length / circle; // IList<double> rowRes1 = new List<double>(); // Index1 row = new Index1(features1[i]); // foreach (var item in row.writeToCSV1(i, symbol)) // { // rowRes1.Add(item); // } // double max = rowRes1[0]; // double min = rowRes1[0]; // for (int k = 1; k < rowRes1.Count; k++) // { // if (max < rowRes1[k]) // { // max = rowRes1[k]; // } // if (min > rowRes1[k]) // { // min = rowRes1[k]; // } // } // for (int k = 0; k < rowRes1.Count; k++) // { // if (rowRes1[k] != 0) // { // rowRes1[k] = (rowRes1[k] - min) / (max - min); // } // } // RDotNet.NumericVector V1 = engine.CreateNumericVector(rowRes1); // engine.SetSymbol("V1", V1); // int a = 0; // for (int m = 0; m < features.Count; m++) // { // if (maxLines[m] - 1 <= maxLength && maxLength <= maxLines[m] + 1) // { // IList<double> rowRes2 = new List<double>(); // int n = 0; // for (int b = m; b < count[m] + m; b++) // { // rowRes2.Add(rowRes[b + a]); //要把第一个的去掉 // n++; // } // a = a + n - 1; // RDotNet.NumericVector V2 = engine.CreateNumericVector(rowRes2); // engine.SetSymbol("V2", V2); // //GenericVector testRes = engine.Evaluate("wilcox.test(V1,V2, paired = FALSE)").AsList(); // using (StreamWriter sw = new StreamWriter(@"H:\test\V1.txt", true)) // { // foreach (var item in V1) // { // sw.Write(item); // } // sw.Write("第{0}个参数换\r\n", j); // } // using (StreamWriter sw = new StreamWriter(@"H:\test\V2.txt", true)) // { // foreach (var item in V2) // { // sw.Write(item); // } // sw.Write("第{0}个参数换\r\n", j); // } // if (V1.Length == V2.Length) // { // GenericVector testRes = engine.Evaluate("t.test(V1,V2, paired = TRUE)").AsList(); // double p = testRes["p.value"].AsNumeric().First(); // //using (StreamWriter sw = new StreamWriter(@"C:\Users\Administrator\Desktop\result\shiyan0.txt", true)) // //{ // // sw.Write("{0}与{1}的P-Value={2}\r\n", i, m, p); // //} // using (StreamWriter sw = new StreamWriter(@"H:\test\shiyan0.txt", true)) // { // sw.Write("{0}与{1}的P-Value={2}\r\n", i, m, p); // } // } // else // { // continue; // } // } // else // { // a += count[m] - 1; // } // } // } //} //Console.WriteLine("成功"); #endregion ////最小线 //LineStringOutput.minLineOutput(features); //////最大线 //LineStringOutput.maxLineOutput(features); //LineStringOutput.maxLineOutput(features1); ////所有线 //LineStringOutput.linesOuput(features); //////最小圆 ////LineStringOutput.minCircleOutput(features); ////最大圆 //LineStringOutput.maxCircleOutput(features); ////线与内圆相交后的多边形 //LineStringOutput.intPolygonOuput(features); ////线与外圆相交后的多边形 //LineStringOutput.outPolygonOuput(features1); ////线与小班相交后的多边形 //LineStringOutput.minPolygonOuput(features); ////输出每隔五度时的每段多边形 //LineStringOutput.interBoundaryOuput(features); //// 输出.csv //string path3 = @"H:\test\结果\ads3.csv"; //int a = 0; //for (int i = 0; i < features1.Count; i++) //{ // a++; // path3 = @"H:\test\"; // path = path3 + "\\" + a + ".csv"; // Index1 indexs = new Index1(features1[i]); // indexs.writeToCSV2(path); //} #region //REngine.SetEnvironmentVariables(); // <-- May be omitted; the next line would call it. //REngine engine = REngine.GetInstance(); ////////engine.Initialize(); //for (int j = 3; j < 4; j++) //{ // Console.WriteLine("第{0}个参数", j); // int symbol = j; // for (int i = 0; i < features.Count; i++) // { // Index1 row = new Index1(features[i]); // IList<double> rowRes = new List<double>(); // foreach (var item in row.writeToCSV1(i, symbol)) // { // rowRes.Add(item); // } // for (int k = i; k < features.Count - 1; k++) // { // Index1 row1 = new Index1(features[k + 1]); // IList<double> rowRes1 = new List<double>(); // foreach (var item in row1.writeToCSV1(k + 1, symbol)) // { // rowRes1.Add(item); // } // RDotNet.NumericVector V1 = engine.CreateNumericVector(rowRes); // engine.SetSymbol("V1", V1); // RDotNet.NumericVector V2 = engine.CreateNumericVector(rowRes1); // engine.SetSymbol("V2", V2); // GenericVector testRes = engine.Evaluate("wilcox.test(V1,V2, paired = FALSE)").AsList(); // double p = testRes["p.value"].AsNumeric().First(); // //Console.WriteLine("Group1: [{0}]", string.Join(", ", V1)); // //Console.WriteLine("Group2: [{0}]", string.Join(", ", V2)); // //engine.Evaluate("source('H:/test/结果/表格/R.r')"); // //Console.WriteLine("{0}与{1}的P-value = {2}", i, k + 1, p); // using (StreamWriter sw = new StreamWriter(@"C:\zj\result\result3.txt", true)) // { // sw.Write("{0}与{1}的P-Value={2}\r\n", i, k + 1, p); // } // } // } //} //Console.ReadKey(); ////////engine.Dispose(); #endregion #region //输出最长最短比和夹角 //IList<ILineString> maxLines = new List<ILineString>(); //IList<ILineString> minLines = new List<ILineString>(); //IList<double> maxMinRatio = new List<double>(); //IList<double> maxMinAngle = new List<double>(); //IList<double> maxMinAngle1 = new List<double>(); //IList<double> angle = new List<double>(); //IList<double> angle1 = new List<double>(); //IList<double> angleResult = new List<double>(); //for (int i = 0; i < features.Count; i++) //{ // maxLines.Add(MaxPoints.getMaxVector(features[i])); // minLines.Add(MinPoints.getMinVector(features[i])); // maxMinRatio.Add(maxLines[i].Length / minLines[i].Length); // double a = features[i].Geometry.Centroid.X; // double b = features[i].Geometry.Centroid.Y; // maxLines[i].Coordinates[1].X = maxLines[i].Coordinates[1].X - a; // maxLines[i].Coordinates[1].Y = maxLines[i].Coordinates[1].Y - b; // minLines[i].Coordinates[1].X = minLines[i].Coordinates[1].X - a; // minLines[i].Coordinates[1].Y = minLines[i].Coordinates[1].Y - b; // if (maxLines[i].Coordinates[1].X > 0 && maxLines[i].Coordinates[1].Y >= 0) // { // angle.Add(Math.Atan2(maxLines[i].Coordinates[1].Y, maxLines[i].Coordinates[1].X) * 180 / Math.PI); // } // else if (maxLines[i].Coordinates[1].X < 0 && maxLines[i].Coordinates[1].Y > 0) // { // angle.Add(180 - (Math.Atan2(maxLines[i].Coordinates[1].Y, -maxLines[i].Coordinates[1].X) * 180 / Math.PI)); // } // else if (maxLines[i].Coordinates[1].X < 0 && maxLines[i].Coordinates[1].Y <= 0) // { // angle.Add(180 + (Math.Atan2(-maxLines[i].Coordinates[1].Y, -maxLines[i].Coordinates[1].X) * 180 / Math.PI)); // } // else if (maxLines[i].Coordinates[1].X > 0 && maxLines[i].Coordinates[1].Y <= 0) // { // angle.Add(360 - (Math.Atan2(-maxLines[i].Coordinates[1].Y, maxLines[i].Coordinates[1].X) * 180 / Math.PI)); // } // if (minLines[i].Coordinates[1].X > 0 && minLines[i].Coordinates[1].Y >= 0) // { // angle1.Add(Math.Atan2(minLines[i].Coordinates[1].Y, minLines[i].Coordinates[1].X) * 180 / Math.PI); // } // else if (minLines[i].Coordinates[1].X < 0 && minLines[i].Coordinates[1].Y > 0) // { // angle1.Add(180 - (Math.Atan2(minLines[i].Coordinates[1].Y, -minLines[i].Coordinates[1].X) * 180 / Math.PI)); // } // else if (minLines[i].Coordinates[1].X < 0 && minLines[i].Coordinates[1].Y <= 0) // { // angle1.Add(180 + (Math.Atan2(-minLines[i].Coordinates[1].Y, -minLines[i].Coordinates[1].X) * 180 / Math.PI)); // } // else if (minLines[i].Coordinates[1].X > 0 && minLines[i].Coordinates[1].Y <= 0) // { // angle1.Add(360 - (Math.Atan2(-minLines[i].Coordinates[1].Y, minLines[i].Coordinates[1].X) * 180 / Math.PI)); // } // if (Math.Abs(angle[i] - angle1[i]) <= 180) // { // angleResult.Add(Math.Abs(angle[i] - angle1[i])); // } // else // { // angleResult.Add(360 - Math.Abs(angle[i] - angle1[i])); // } //} //foreach (var item in maxMinRatio) //{ // Console.WriteLine(item + "最长最短比"); //} //foreach (var item in angleResult) //{ // Console.WriteLine(item + "最长最短夹角"); //} #endregion Console.WriteLine("输出成功"); Console.Read(); }
/// <summary cref="ArrayViewSource.GetAsRawArray( /// AcceleratorStream, Index1, Index1)"/> protected internal override ArraySegment <byte> GetAsRawArray( AcceleratorStream stream, Index1 byteOffset, Index1 byteExtent) => throw new InvalidOperationException();