private static ProcessMemoryUsage GetProcessMemoryUsage(MemoryInfoResult memoryInfo) { var workingSetInBytes = memoryInfo.WorkingSet.GetValue(SizeUnit.Bytes); var privateMemory = MemoryInformation.GetManagedMemoryInBytes() + MemoryInformation.GetUnManagedAllocationsInBytes(); return(new ProcessMemoryUsage(workingSetInBytes, privateMemory)); }
private static byte *ThrowFailedToAllocate(long size, ThreadStats thread, OutOfMemoryException e) { long allocated = 0; foreach (var threadAllocationsValue in AllThreadStats) { allocated += threadAllocationsValue.TotalAllocated; } var managed = MemoryInformation.GetManagedMemoryInBytes(); var unmanagedMemory = MemoryInformation.GetUnManagedAllocationsInBytes(); throw new OutOfMemoryException($"Failed to allocate additional {new Size(size, SizeUnit.Bytes)} " + $"to already allocated {new Size(thread.TotalAllocated, SizeUnit.Bytes)} by this thread. " + $"Total allocated by all threads: {new Size(allocated, SizeUnit.Bytes)}, " + $"Managed memory: {new Size(managed, SizeUnit.Bytes)}, " + $"Un-managed memory: {new Size(unmanagedMemory, SizeUnit.Bytes)}", e); }
public bool Execute(DocumentsOperationContext databaseContext, TransactionOperationContext indexContext, Lazy <IndexWriteOperation> writeOperation, IndexingStatsScope stats, CancellationToken token) { if (_mapReduceContext.StoreByReduceKeyHash.Count == 0) { WriteLastEtags(indexContext); // we need to write etags here, because if we filtered everything during map then we will loose last indexed etag information and this will cause an endless indexing loop return(false); } ReduceResultsSchema.Create(indexContext.Transaction.InnerTransaction, PageNumberToReduceResultTableName, 32); var table = indexContext.Transaction.InnerTransaction.OpenTable(ReduceResultsSchema, PageNumberToReduceResultTableName); var lowLevelTransaction = indexContext.Transaction.InnerTransaction.LowLevelTransaction; var writer = writeOperation.Value; var treeScopeStats = stats.For(IndexingOperation.Reduce.TreeScope, start: false); var nestedValuesScopeStats = stats.For(IndexingOperation.Reduce.NestedValuesScope, start: false); foreach (var store in _mapReduceContext.StoreByReduceKeyHash) { token.ThrowIfCancellationRequested(); using (var reduceKeyHash = indexContext.GetLazyString(store.Key.ToString(CultureInfo.InvariantCulture))) using (store.Value) using (_aggregationBatch) { var modifiedStore = store.Value; switch (modifiedStore.Type) { case MapResultsStorageType.Tree: using (treeScopeStats.Start()) { HandleTreeReduction(indexContext, treeScopeStats, modifiedStore, lowLevelTransaction, writer, reduceKeyHash, table, token); } break; case MapResultsStorageType.Nested: using (nestedValuesScopeStats.Start()) { HandleNestedValuesReduction(indexContext, nestedValuesScopeStats, modifiedStore, writer, reduceKeyHash, token); } break; default: throw new ArgumentOutOfRangeException(modifiedStore.Type.ToString()); } } if (_mapReduceContext.FreedPages.Count > 0) { long tmp = 0; using (treeScopeStats.Start()) using (Slice.External(indexContext.Allocator, (byte *)&tmp, sizeof(long), out Slice pageNumberSlice)) { foreach (var freedPage in _mapReduceContext.FreedPages) { tmp = Bits.SwapBytes(freedPage); table.DeleteByKey(pageNumberSlice); } } } } if (stats.Duration >= MinReduceDurationToCalculateProcessMemoryUsage) { var workingSet = MemoryInformation.GetWorkingSetInBytes(); var privateMemory = MemoryInformation.GetManagedMemoryInBytes() + MemoryInformation.GetUnManagedAllocationsInBytes(); stats.RecordReduceMemoryStats(workingSet, privateMemory); } WriteLastEtags(indexContext); _mapReduceContext.StoreNextMapResultId(); return(false); }