protected override void IndexCore(string partitionName, IEnumerable <TIndexed> items) { var batch = new TableBatchOperation(); foreach (var item in items) { batch.Add(TableOperation.InsertOrReplace(ToTableEntity(item))); } var table = GetCloudTable(); var options = new TableRequestOptions() { PayloadFormat = TablePayloadFormat.Json, MaximumExecutionTime = _Timeout, ServerTimeout = _Timeout, }; var context = new OperationContext(); Queue <TableBatchOperation> batches = new Queue <TableBatchOperation>(); batches.Enqueue(batch); while (batches.Count > 0) { batch = batches.Dequeue(); try { Stopwatch watch = new Stopwatch(); watch.Start(); if (batch.Count > 1) { table.ExecuteBatchAsync(batch, options, context).GetAwaiter().GetResult(); } else { if (batch.Count == 1) { table.ExecuteAsync(batch[0], options, context).GetAwaiter().GetResult(); } } Interlocked.Add(ref _IndexedEntities, batch.Count); } catch (Exception ex) { if (IsError413(ex) /* Request too large */ || Helper.IsError(ex, "OperationTimedOut")) { // Reduce the size of all batches to half the size of the offending batch. int maxSize = Math.Max(1, batch.Count / 2); bool workDone = false; Queue <TableBatchOperation> newBatches = new Queue <TableBatchOperation>(); for (/* starting with the current batch */; ; batch = batches.Dequeue()) { for (; batch.Count > maxSize;) { newBatches.Enqueue(ToBatch(batch.Take(maxSize).ToList())); batch = ToBatch(batch.Skip(maxSize).ToList()); workDone = true; } if (batch.Count > 0) { newBatches.Enqueue(batch); } if (batches.Count == 0) { break; } } batches = newBatches; // Nothing could be done? if (!workDone) { throw; } } else if (Helper.IsError(ex, "EntityTooLarge")) { var op = GetFaultyOperation(ex, batch); var entity = (DynamicTableEntity)GetEntity(op); var serialized = entity.Serialize(); Configuration .GetBlocksContainer() .GetBlockBlobReference(entity.GetFatBlobName()) .UploadFromByteArrayAsync(serialized, 0, serialized.Length).GetAwaiter().GetResult(); entity.MakeFat(serialized.Length); batches.Enqueue(batch); } else { IndexerTrace.ErrorWhileImportingEntitiesToAzure(batch.Select(b => GetEntity(b)).ToArray(), ex); batches.Enqueue(batch); throw; } } } }
protected override void IndexCore(string partitionName, IEnumerable <TIndexed> items) { var batch = new TableBatchOperation(); foreach (var item in items) { batch.Add(TableOperation.InsertOrReplace(ToTableEntity(item))); } var table = GetCloudTable(); var options = new TableRequestOptions() { PayloadFormat = TablePayloadFormat.Json, MaximumExecutionTime = _Timeout, ServerTimeout = _Timeout, }; var context = new OperationContext(); Queue <TableBatchOperation> batches = new Queue <TableBatchOperation>(); batches.Enqueue(batch); while (batches.Count > 0) { batch = batches.Dequeue(); try { Stopwatch watch = new Stopwatch(); watch.Start(); if (batch.Count > 1) { table.ExecuteBatchAsync(batch, options, context).GetAwaiter().GetResult(); } else { if (batch.Count == 1) { table.ExecuteAsync(batch[0], options, context).GetAwaiter().GetResult(); } } Interlocked.Add(ref _IndexedEntities, batch.Count); } catch (Exception ex) { if (IsError413(ex)) { var split = batch.Count / 2; var batch1 = batch.Take(split).ToList(); var batch2 = batch.Skip(split).Take(batch.Count - split).ToList(); batches.Enqueue(ToBatch(batch1)); batches.Enqueue(ToBatch(batch2)); } else if (Helper.IsError(ex, "EntityTooLarge")) { var op = GetFaultyOperation(ex, batch); var entity = (DynamicTableEntity)GetEntity(op); var serialized = entity.Serialize(); Configuration .GetBlocksContainer() .GetBlockBlobReference(entity.GetFatBlobName()) .UploadFromByteArrayAsync(serialized, 0, serialized.Length).GetAwaiter().GetResult(); entity.MakeFat(serialized.Length); batches.Enqueue(batch); } else { IndexerTrace.ErrorWhileImportingEntitiesToAzure(batch.Select(b => GetEntity(b)).ToArray(), ex); batches.Enqueue(batch); throw; } } } }
protected override void IndexCore(string partitionName, IEnumerable <BlockInfo> blocks) { var first = blocks.First(); var block = first.Block; var hash = first.BlockId.ToString(); Stopwatch watch = new Stopwatch(); watch.Start(); while (true) { var container = Configuration.GetBlocksContainer(); var client = container.ServiceClient; client.DefaultRequestOptions.SingleBlobUploadThresholdInBytes = 32 * 1024 * 1024; var blob = container.GetPageBlobReference(hash); MemoryStream ms = new MemoryStream(); block.ReadWrite(ms, true); var blockBytes = ms.GetBuffer(); long length = 512 - (ms.Length % 512); if (length == 512) { length = 0; } Array.Resize(ref blockBytes, (int)(ms.Length + length)); try { blob. UploadFromByteArrayAsync(blockBytes, 0, blockBytes.Length, new AccessCondition() { //Will throw if already exist, save 1 call IfNotModifiedSinceTime = DateTimeOffset.MinValue }, new BlobRequestOptions() { MaximumExecutionTime = _Timeout, ServerTimeout = _Timeout } , new OperationContext()).GetAwaiter().GetResult(); watch.Stop(); IndexerTrace.BlockUploaded(watch.Elapsed, blockBytes.Length); Interlocked.Increment(ref _IndexedBlocks); break; } catch (StorageException ex) { var alreadyExist = ex.RequestInformation != null && ex.RequestInformation.HttpStatusCode == 412; if (!alreadyExist) { IndexerTrace.ErrorWhileImportingBlockToAzure(uint256.Parse(hash), ex); throw; } watch.Stop(); IndexerTrace.BlockAlreadyUploaded(); Interlocked.Increment(ref _IndexedBlocks); break; } catch (Exception ex) { IndexerTrace.ErrorWhileImportingBlockToAzure(uint256.Parse(hash), ex); throw; } } }