public uint GetCounterValue() { uint counterValue; using (var countBuffer = new RawBuffer <uint>(1)) { CopyCount(countBuffer); counterValue = countBuffer[0]; } return(counterValue); }
internal sealed override void GenerateSortKeysAndCopyReferences( ref TypeBatch typeBatch, int bundleStart, int localBundleStart, int bundleCount, int constraintStart, int localConstraintStart, int constraintCount, ref int firstSortKey, ref int firstSourceIndex, ref RawBuffer bodyReferencesCache) { GenerateSortKeysAndCopyReferences <OneBodySortKeyGenerator>( ref typeBatch, bundleStart, localBundleStart, bundleCount, constraintStart, localConstraintStart, constraintCount, ref firstSortKey, ref firstSourceIndex, ref bodyReferencesCache); }
public bool TryCompression(Table table, TableSchema schema) { try { var tx = table._tx; int maxSpace = ZstdLib.GetMaxCompression(RawBuffer.Length); _compressedScope = tx.Allocator.Allocate(maxSpace + OverheadSize, out CompressedBuffer); Compressed = false; var compressionDictionary = tx.LowLevelTransaction.Environment.CompressionDictionariesHolder .GetCompressionDictionaryFor(tx, table.CurrentCompressionDictionaryId); CompressionTried = true; var size = ZstdLib.Compress(RawBuffer.ToReadOnlySpan(), CompressedBuffer.ToSpan(), compressionDictionary); size += WriteVariableSizeIntInReverse(CompressedBuffer.Ptr + size, compressionDictionary.Id); CompressedBuffer.Truncate(size); var compressionRatio = GetCompressionRatio(size, RawBuffer.Length); if (compressionRatio > compressionDictionary.ExpectedCompressionRatio + 10) { // training dictionaries is expensive, only do that if we see that the current compressed // value is significantly worse than the previous one var etagTree = table.GetFixedSizeTree(schema.CompressedEtagSourceIndex); if (ShouldRetrain(etagTree)) { MaybeTrainCompressionDictionary(table, etagTree); } } if (CompressedBuffer.Length >= RawBuffer.Length) { // we compressed too large, so we skip compression here _compressedScope.Dispose(); // Explicitly not disposing this, we need to have the raw buffer // when we do update then insert and the size is too large // RawScope.Dispose(); Compressed = false; return(false); } Compressed = true; return(true); } catch { _compressedScope.Dispose(); RawScope.Dispose(); throw; } }
public async Task Insert(Vector3[] data) { uint gx, gy, gz; shader.GetKernelThreadGroupSizes(computeLeavesKernel, out gx, out gy, out gz); int numGroupsX = Mathf.CeilToInt((float)data.Length / gx); using (var leaves = new StructuredBuffer <int>(data.Length)) using (var leafCount = new RawBuffer <uint>(1)) using (var keys = new CounterBuffer <int>(data.Length)) using (var points = new StructuredBuffer <Vector3>(data.Length)) { points.SetData(data); shader.SetFloats("size", bounds.size.x, bounds.size.y, bounds.size.z); shader.SetFloats("min_corner", bounds.min.x, bounds.min.y, bounds.min.z); shader.SetInt("max_depth", maxDepth); shader.SetInt("point_count", data.Length); shader.SetBuffer(computeLeavesKernel, "leaves", leaves.Buffer); shader.SetBuffer(computeLeavesKernel, "points", points.Buffer); shader.Dispatch(computeLeavesKernel, numGroupsX, 1, 1); sorter.Sort(leaves, data.Length); shader.SetBuffer(markUniqueLeavesKernel, "leaves", leaves.Buffer); shader.SetBuffer(markUniqueLeavesKernel, "unique", keys.Buffer); shader.Dispatch(markUniqueLeavesKernel, numGroupsX, 1, 1); compactor.Compact(leaves, keys, data.Length); keys.CopyCount(indirectArgs); shader.SetBuffer(computeArgsKernel, "args", indirectArgs.Buffer); shader.Dispatch(computeArgsKernel, 1, 1, 1); keys.CopyCount(leafCount); shader.SetBuffer(subdivideKernel, "leaf_count", leafCount.Buffer); shader.SetBuffer(subdivideKernel, "leaves", leaves.Buffer); shader.SetBuffer(subdivideKernel, "nodes", nodes.Buffer); for (int i = 0; i < maxDepth; i++) { shader.SetInt("current_level", i); shader.DispatchIndirect(subdivideKernel, indirectArgs.Buffer); } nodeData = await nodes.GetDataAsync(); nodeCount = (int)nodes.GetCounterValue(); } }
public static void load_translation(emu_options m_options) { g_translation.Clear(); emu_file file = new emu_file(m_options.language_path(), global_object.OPEN_FLAG_READ); var name = m_options.language(); name = name.Replace(" ", "_"); name = name.Replace("(", ""); name = name.Replace(")", ""); if (file.open(name, global_object.PATH_SEPARATOR + "strings.mo") == osd_file.error.NONE) { uint64_t size = file.size(); RawBuffer buffer = new RawBuffer(4 * (int)size / 4 + 1); //uint32_t *buffer = global_alloc_array(uint32_t, size / 4 + 1); file.read(new ListBytesPointer(buffer), (UInt32)size); file.close(); if (buffer.get_uint32(0) != MO_MAGIC && buffer.get_uint32(0) != MO_MAGIC_REVERSED) { buffer = null; //global_free_array(buffer); return; } if (buffer.get_uint32(0) == MO_MAGIC_REVERSED) { for (var i = 0; i < ((int)size / 4) + 1; ++i) { buffer.set_uint32(i, endianchange(buffer[i])); } } uint32_t number_of_strings = buffer.get_uint32(2); uint32_t original_table_offset = buffer.get_uint32(3) >> 2; uint32_t translation_table_offset = buffer.get_uint32(4) >> 2; RawBuffer data = buffer; //const char *data = reinterpret_cast<const char*>(buffer); for (var i = 1; i < number_of_strings; ++i) { string original = "TODO original"; //(const char *)data + buffer[original_table_offset + 2 * i + 1]; string translation = "TODO translation"; //(const char *)data + buffer[translation_table_offset + 2 * i + 1]; g_translation.emplace(original, translation); } buffer = null; //global_free_array(buffer); } }
//---------------------------------------------------------------------------------- #region Ctor public DynamicSizeBufferCache(int capacity) { _cacheCapacity = capacity.MinAt(1); _items_capacity = _cacheCapacity - 1; _firstItem = RawBuffer.Null; if (_items_capacity > 0) { _items = new RawBuffer[_items_capacity]; for (int i = 0; i < _items_capacity; i++) { _items[i] = RawBuffer.Null; } } else { _items = null; } }
//---------------------------------------------------------------------------------- #region Remove/Clear (Large buffer) /// <summary> /// Remove the largest buffer /// Small buffer ocuping the cache will be remove automaticlly when allocating /// Large buffer is safe for allocation But it consumes memory /// Call this to free memory.. /// </summary> public void RemoveLargest() { int largeID = int.MaxValue; RawBuffer largBuf = RawBuffer.Null; if (_firstItem.buf != IntPtr.Zero) { largeID = _firstID; largBuf = _firstItem; } for (int i = 0; i < _items_capacity; i++) { RawBuffer cur = _items[i]; if (cur.buf != IntPtr.Zero && (largBuf.buf == IntPtr.Zero || largBuf.bSize < cur.bSize)) { largeID = i; largBuf = cur; } } //remove largest if (largeID == _firstID) { var ptr = Interlocked.Exchange(ref _firstItem.buf, IntPtr.Zero); if (ptr != IntPtr.Zero) { _firstItem.bSize = 0; FreeHGlobal(ptr); } } else { var ptr = Interlocked.Exchange(ref _items[largeID].buf, IntPtr.Zero); if (ptr != IntPtr.Zero) { _items[largeID].bSize = 0; FreeHGlobal(ptr); } } }
/*------------------------------------------------- * PIXEL_OP_REBASE_OPAQUE - render all pixels * regardless of pen, adding 'color' to the * pen value * -------------------------------------------------*/ //public static void PIXEL_OP_REBASE_OPAQUE(ref int DEST, int PRIORITY, int SOURCE, int color) public static void PIXEL_OP_REBASE_OPAQUE(RawBuffer DEST, UInt32 DESTOffset, ListBytes PRIORITY, UInt32 PRIORITYOffset, byte SOURCE, UInt32 color, UInt32 trans_mask, ListPointer <rgb_t> paldata, int PIXEL_TYPE_SIZE) { //PIXEL_OP_(destptrBuf, 0 * PIXEL_TYPE_SIZE, priptrBuf, priptrBufOffset, srcdata[srcptrOffset], PIXEL_TYPE_SIZE); //PIXEL_OP_(destptr[0], priptr[0], srcptr[0]); //DEST = color + SOURCE; if (PIXEL_TYPE_SIZE == 1) { DEST[DESTOffset] = (byte)(color + SOURCE); } else if (PIXEL_TYPE_SIZE == 2) { DEST.set_uint16((int)DESTOffset, (UInt16)(color + SOURCE)); } else if (PIXEL_TYPE_SIZE == 4) { DEST.set_uint32((int)DESTOffset, color + SOURCE); } else if (PIXEL_TYPE_SIZE == 8) { DEST.set_uint64((int)DESTOffset, color + SOURCE); } }
//---------------------------------------------------------------------------------- internal RawBuffer TryAllocate(int bSize) { RawBuffer pBuf = _firstItem; short smallest = _nullID; short cacheCounter = 0;//number of buffer(not null) in the cache, used to indicate cache's usage int smallSize = short.MaxValue; if (pBuf.buf != IntPtr.Zero) //_firstItem not null { if (pBuf.bSize >= bSize) // first size match { if (pBuf.buf == Interlocked.CompareExchange(ref _firstItem.buf, IntPtr.Zero, pBuf.buf)) {//_firstItem allcated _firstItem.bSize = 0; TryClearVote(_firstID); return(pBuf); } //else _firstItem taken go on with _items cacheCounter++; } else// too small to allocate { smallest = _firstID; smallSize = _firstItem.bSize; pBuf = RawBuffer.Null; } } if (_items != null) // buffer container not noll { //first is null or too small or has been taken from other thread for (short i = 0; i < _items_capacity; i++) { pBuf = _items[i]; if (pBuf.buf != IntPtr.Zero) { //has candidate here if (pBuf.bSize >= bSize) { //candidate match size if (pBuf.buf == Interlocked.CompareExchange(ref _items[i].buf, IntPtr.Zero, pBuf.buf)) { //candidate allocated _items[i].bSize = 0; TryClearVote(i); return(pBuf); } } else if (pBuf.bSize < smallSize) {//candidate too small to allocate and even smaller than previous smallSize smallest = i; smallSize = pBuf.bSize; } //item not null but cannot use cacheCounter++; } } } //no matching buffer found // smallest buffer set if (smallest != _nullID) { VoteToRemove(smallest, cacheCounter); } //Else smallest buffer not set //Which means cache is all null //So no need to Vote/Remove at all return(RawBuffer.Null); }
internal abstract void Regather( ref TypeBatch typeBatch, int constraintStart, int constraintCount, ref int firstSourceIndex, ref Buffer <int> indexToHandleCache, ref RawBuffer bodyReferencesCache, ref RawBuffer prestepCache, ref RawBuffer accumulatedImpulsesCache, ref Buffer <ConstraintLocation> handlesToConstraints);
internal abstract void CopyToCache( ref TypeBatch typeBatch, int bundleStart, int localBundleStart, int bundleCount, int constraintStart, int localConstraintStart, int constraintCount, ref Buffer <int> indexToHandleCache, ref RawBuffer prestepCache, ref RawBuffer accumulatedImpulsesCache);
public void CopyCount(RawBuffer <uint> other, int itemOffset = 0) { ComputeBuffer.CopyCount(Buffer, other.Buffer, itemOffset * other.Buffer.stride); }
public static unsafe void CreateBinnedResources(BufferPool bufferPool, int maximumSubtreeCount, out RawBuffer buffer, out BinnedResources resources) { //TODO: This is a holdover from the pre-BufferPool tree design. It's pretty ugly. While some preallocation is useful (there's no reason to suffer the overhead of //pulling things out of the BufferPool over and over and over again), the degree to which this preallocates has a negative impact on L1 cache for subtree refines. int nodeCount = maximumSubtreeCount - 1; int bytesRequired = 16 * (3 + 3 + 1) + sizeof(BoundingBox) * (maximumSubtreeCount + 3 * nodeCount + 3 * MaximumBinCount) + 16 * (6 + 3 + 8) + sizeof(int) * (maximumSubtreeCount * 6 + nodeCount * 3 + MaximumBinCount * 8) + 16 * (1) + sizeof(Vector3) * maximumSubtreeCount + 16 * (1) + sizeof(SubtreeHeapEntry) * maximumSubtreeCount + 16 * (1) + sizeof(Node) * nodeCount + 16 * (1) + sizeof(int) * nodeCount; bufferPool.Take(bytesRequired, out buffer); var memory = buffer.Memory; int memoryAllocated = 0; resources.BoundingBoxes = (BoundingBox *)Suballocate(memory, ref memoryAllocated, sizeof(BoundingBox) * maximumSubtreeCount); resources.LeafCounts = (int *)Suballocate(memory, ref memoryAllocated, sizeof(int) * maximumSubtreeCount); resources.IndexMap = (int *)Suballocate(memory, ref memoryAllocated, sizeof(int) * maximumSubtreeCount); resources.Centroids = (Vector3 *)Suballocate(memory, ref memoryAllocated, sizeof(Vector3) * maximumSubtreeCount); resources.SubtreeHeapEntries = (SubtreeHeapEntry *)Suballocate(memory, ref memoryAllocated, sizeof(SubtreeHeapEntry) * maximumSubtreeCount); resources.StagingNodes = (Node *)Suballocate(memory, ref memoryAllocated, sizeof(Node) * nodeCount); resources.RefineFlags = (int *)Suballocate(memory, ref memoryAllocated, sizeof(int) * nodeCount); resources.SubtreeBinIndicesX = (int *)Suballocate(memory, ref memoryAllocated, sizeof(int) * maximumSubtreeCount); resources.SubtreeBinIndicesY = (int *)Suballocate(memory, ref memoryAllocated, sizeof(int) * maximumSubtreeCount); resources.SubtreeBinIndicesZ = (int *)Suballocate(memory, ref memoryAllocated, sizeof(int) * maximumSubtreeCount); resources.TempIndexMap = (int *)Suballocate(memory, ref memoryAllocated, sizeof(int) * maximumSubtreeCount); resources.ALeafCountsX = (int *)Suballocate(memory, ref memoryAllocated, sizeof(int) * nodeCount); resources.ALeafCountsY = (int *)Suballocate(memory, ref memoryAllocated, sizeof(int) * nodeCount); resources.ALeafCountsZ = (int *)Suballocate(memory, ref memoryAllocated, sizeof(int) * nodeCount); resources.AMergedX = (BoundingBox *)Suballocate(memory, ref memoryAllocated, sizeof(BoundingBox) * nodeCount); resources.AMergedY = (BoundingBox *)Suballocate(memory, ref memoryAllocated, sizeof(BoundingBox) * nodeCount); resources.AMergedZ = (BoundingBox *)Suballocate(memory, ref memoryAllocated, sizeof(BoundingBox) * nodeCount); resources.BinBoundingBoxesX = (BoundingBox *)Suballocate(memory, ref memoryAllocated, sizeof(BoundingBox) * MaximumBinCount); resources.BinBoundingBoxesY = (BoundingBox *)Suballocate(memory, ref memoryAllocated, sizeof(BoundingBox) * MaximumBinCount); resources.BinBoundingBoxesZ = (BoundingBox *)Suballocate(memory, ref memoryAllocated, sizeof(BoundingBox) * MaximumBinCount); resources.BinLeafCountsX = (int *)Suballocate(memory, ref memoryAllocated, sizeof(int) * MaximumBinCount); resources.BinLeafCountsY = (int *)Suballocate(memory, ref memoryAllocated, sizeof(int) * MaximumBinCount); resources.BinLeafCountsZ = (int *)Suballocate(memory, ref memoryAllocated, sizeof(int) * MaximumBinCount); resources.BinSubtreeCountsX = (int *)Suballocate(memory, ref memoryAllocated, sizeof(int) * MaximumBinCount); resources.BinSubtreeCountsY = (int *)Suballocate(memory, ref memoryAllocated, sizeof(int) * MaximumBinCount); resources.BinSubtreeCountsZ = (int *)Suballocate(memory, ref memoryAllocated, sizeof(int) * MaximumBinCount); resources.BinStartIndices = (int *)Suballocate(memory, ref memoryAllocated, sizeof(int) * MaximumBinCount); resources.BinSubtreeCountsSecondPass = (int *)Suballocate(memory, ref memoryAllocated, sizeof(int) * MaximumBinCount); Debug.Assert(memoryAllocated <= buffer.Length, "The allocated buffer should be large enough for all the suballocations."); }
public void Initialize() { pool = new BufferPool(); buffer = new RawBuffer(); }
// moved to device_sound_interface_samples // device_sound_interface overrides //virtual void sound_stream_update(sound_stream &stream, stream_sample_t **inputs, stream_sample_t **outputs, int samples); // internal helpers //------------------------------------------------- // read_wav_sample - read a WAV file as a sample //------------------------------------------------- static bool read_wav_sample(emu_file file, sample_t sample) { // we already read the opening 'RIFF' tag uint32_t offset = 4; // get the total size uint32_t filesize; RawBuffer filesizeBuffer = new RawBuffer(4); offset += file.read(new ListBytesPointer(filesizeBuffer), 4); if (offset < 8) { osd_printf_warning("Unexpected size offset {0} ({1})\n", offset, file.filename()); return(false); } filesize = filesizeBuffer.get_uint32(); filesize = little_endianize_int32(filesize); // read the RIFF file type and make sure it's a WAVE file RawBuffer buf = new RawBuffer(32); //char [] buf = new char[32]; offset += file.read(new ListBytesPointer(buf), 4); if (offset < 12) { osd_printf_warning("Unexpected WAVE offset {0} ({1})\n", offset, file.filename()); return(false); } if (!(buf[0] == 'W' && buf[1] == 'A' && buf[2] == 'V' && buf[3] == 'E')) // memcmp(&buf[0], "WAVE", 4) != 0) { osd_printf_warning("Could not find WAVE header ({0})\n", file.filename()); return(false); } // seek until we find a format tag uint32_t length; RawBuffer lengthBuffer = new RawBuffer(4); while (true) { offset += file.read(new ListBytesPointer(buf), 4); offset += file.read(new ListBytesPointer(lengthBuffer), 4); length = lengthBuffer.get_uint32(); length = little_endianize_int32(length); if (buf[0] == 'f' && buf[1] == 'm' && buf[2] == 't' && buf[3] == ' ') //if (memcmp(&buf[0], "fmt ", 4) == 0) { break; } // seek to the next block file.seek(length, emu_file.SEEK_CUR); offset += length; if (offset >= filesize) { osd_printf_warning("Could not find fmt tag ({0})\n", file.filename()); return(false); } } // read the format -- make sure it is PCM uint16_t temp16; RawBuffer temp16Buffer = new RawBuffer(2); offset += file.read(new ListBytesPointer(temp16Buffer), 2); temp16 = temp16Buffer.get_uint16(); temp16 = little_endianize_int16(temp16); if (temp16 != 1) { osd_printf_warning("unsupported format {0} - only PCM is supported ({1})\n", temp16, file.filename()); return(false); } // number of channels -- only mono is supported offset += file.read(new ListBytesPointer(temp16Buffer), 2); temp16 = temp16Buffer.get_uint16(); temp16 = little_endianize_int16(temp16); if (temp16 != 1) { osd_printf_warning("unsupported number of channels {0} - only mono is supported ({1})\n", temp16, file.filename()); return(false); } // sample rate uint32_t rate; RawBuffer rateBuffer = new RawBuffer(4); offset += file.read(new ListBytesPointer(rateBuffer), 4); rate = rateBuffer.get_uint32(); rate = little_endianize_int32(rate); // bytes/second and block alignment are ignored offset += file.read(new ListBytesPointer(buf), 6); // bits/sample uint16_t bits; RawBuffer bitsBuffer = new RawBuffer(2); offset += file.read(new ListBytesPointer(bitsBuffer), 2); bits = bitsBuffer.get_uint16(); bits = little_endianize_int16(bits); if (bits != 8 && bits != 16) { osd_printf_warning("unsupported bits/sample {0} - only 8 and 16 are supported ({1})\n", bits, file.filename()); return(false); } // seek past any extra data file.seek(length - 16, emu_file.SEEK_CUR); offset += length - 16; // seek until we find a data tag while (true) { offset += file.read(new ListBytesPointer(buf), 4); offset += file.read(new ListBytesPointer(lengthBuffer), 4); length = lengthBuffer.get_uint32(); length = little_endianize_int32(length); if (buf[0] == 'd' && buf[1] == 'a' && buf[2] == 't' && buf[3] == 'a') //if (memcmp(&buf[0], "data", 4) == 0) { break; } // seek to the next block file.seek(length, emu_file.SEEK_CUR); offset += length; if (offset >= filesize) { osd_printf_warning("Could not find data tag ({0})\n", file.filename()); return(false); } } // if there was a 0 length data block, we're done if (length == 0) { osd_printf_warning("empty data block ({0})\n", file.filename()); return(false); } // fill in the sample data sample.frequency = rate; // read the data in if (bits == 8) { sample.data.resize((int)length); RawBuffer sample_data_8bit = new RawBuffer(length); file.read(new ListBytesPointer(sample_data_8bit), length); // convert 8-bit data to signed samples ListBytesPointer tempptr = new ListBytesPointer(sample_data_8bit); //uint8_t *tempptr = reinterpret_cast<uint8_t *>(&sample.data[0]); for (int sindex = (int)length - 1; sindex >= 0; sindex--) { sample.data[sindex] = (Int16)((sbyte)(tempptr[sindex] ^ 0x80) * 256); } } else { // 16-bit data is fine as-is sample.data.resize((int)length / 2); RawBuffer sample_data_8bit = new RawBuffer(length); file.read(new ListBytesPointer(sample_data_8bit), length); // swap high/low on big-endian systems if (ENDIANNESS_NATIVE != endianness_t.ENDIANNESS_LITTLE) { for (UInt32 sindex = 0; sindex < length / 2; sindex++) { sample.data[sindex] = (Int16)little_endianize_int16(sample_data_8bit.get_uint16((int)sindex)); //sample.data[sindex]); } } } return(true); }
public void FreeRaw(ref RawBuffer buf) { FreeRaw(buf); buf = RawBuffer.Null; }
//---------------------------------------------------------------------------------- #region Internal Allocate (buffer) public RawBuffer AllocateRaw(int byteSize) { if (byteSize < 0) { throw new ArgumentOutOfRangeException(); } int matchLevel = Array.BinarySearch(_fixedBufferBSize, byteSize); if (matchLevel < 0) { matchLevel = ~matchLevel; } RawBuffer buf = RawBuffer.Null; if (matchLevel >= _fixedSizeLevelCount) { if (_largeBuffers == null) { #if DEBUG DebugX.Log($"BufferPool large buffer({byteSize})"); #endif buf.buf = AllocHGlobal(byteSize); buf.bSize = byteSize; return(buf); } return(_largeBuffers.AllocateRaw(byteSize)); } int searchUintil = Min(_fixedSizeLevelCount, matchLevel + BufferSizeLevelSearchCount); for (int l = matchLevel; l < searchUintil; l++) { var fsp = _FixedSizePools[l]; buf.buf = fsp.TryAlloc(); if (buf.buf != IntPtr.Zero) { buf.bSize = fsp._byteSize; return(buf); } } if (_largeBuffers != null) { buf = _largeBuffers.TryAllocate(byteSize); } if (buf.buf != IntPtr.Zero) { return(buf); } else { int fSize = _fixedBufferBSize[matchLevel]; #if DEBUG DebugX.Log($"BufferPool new buffer({fSize})"); #endif buf.buf = AllocHGlobal(fSize); buf.bSize = fSize; return(buf); } }
internal abstract void GenerateSortKeysAndCopyReferences( ref TypeBatch typeBatch, int bundleStart, int localBundleStart, int bundleCount, int constraintStart, int localConstraintStart, int constraintCount, ref int firstSortKey, ref int firstSourceIndex, ref RawBuffer bodyReferencesCache);