/// <summary> /// !Note UploadAsync must not report a complete status when the actual uploading /// is not finished -- otherwise <code>FinishUploading</code> will be called prematurely. /// </summary> /// <param name="payload"></param> /// <returns></returns> public async Task UploadAsync(IPersistentDataChunk payload) { try { await m_sem.WaitAsync(); //TODO make sure everything in IPersistentDataChunk are in range var partial_idx = ChunkSerialization.ToString(payload.DataChunkRange); Log.WriteLine(LogLevel.Info, $"{nameof(BlobUploader)}: uploading {partial_idx}."); var buf = payload.GetBuffer(); await Task.WhenAll( m_dir.GetBlockBlobReference($"{Constants.c_partition_index}_{payload.DataChunkRange.Id}") // TODO(maybe): index_<chunk id> should be _index. Append `parse(chunk)` to the tail of `_index`. .Then(_ => m_helper.UploadTextAsync(_, partial_idx)), m_dir.GetBlockBlobReference(payload.DataChunkRange.Id.ToString()) .Then(_ => m_helper.UploadDataAsync(_, buf))); Log.WriteLine(LogLevel.Info, $"{nameof(BlobUploader)}: finished uploading {partial_idx}."); } finally { m_sem.Release(); } }
void Generate() { if (!Mathf.IsPowerOfTwo(width) || !Mathf.IsPowerOfTwo(height)) { Debug.LogError("ARCHIPELAGO SIZE IS NOT A POWER OF TWO. --- STOPPING GENERATION ---"); return; } archipelagoMap = ArchipelagoTextureGenerator.GenerateTexture(regions, width, height, waterRate); archipelago = new GameObject("Archipelago").GetComponent <Transform>(); gameObject.AddComponent <ChunkSerialization>(); ChunkSerialization.Reset(); ChunkSerialization.chunkSize = regionSize; ChunkSerialization.viewer = player; Color[] colorMap = archipelagoMap.GetPixels(); for (int z = 0; z < height; z++) { for (int x = 0; x < width; x++) { int index = (width * z) + x; Vector3 pos = new Vector3(x - Mathf.RoundToInt(width / 2), 0, z - Mathf.RoundToInt(height / 2)) * regionSize; GameObject obj; foreach (Region r in regions) { if (Mathf.Abs(r.color.r - colorMap[index].r) <= 0.009f) { obj = Instantiate(r.region, pos, Quaternion.identity, archipelago); ChunkSerialization.chunks.Add(pos / regionSize, new ChunkSerialization.TerrainChunk(pos, obj)); } } } } Destroy(this); }
private async Task <InMemoryDataChunk> _Download_impl(Chunk chunk) { var file = m_dir.GetBlockBlobReference(chunk.Id.ToString()); using (var ms = new MemoryStream()) { Log.WriteLine(LogLevel.Info, $"{nameof(BlobDownloader)}: Version {m_version}: downloading {ChunkSerialization.ToString(chunk)}."); await file.DownloadToStreamAsync(ms); var buf = ms.ToArray(); Log.WriteLine(LogLevel.Info, $"{nameof(BlobDownloader)}: Version {m_version}: finished {ChunkSerialization.ToString(chunk)}."); return(new InMemoryDataChunk(chunk, buf, m_lowKey, m_highKey)); } }
private async Task _Download(Chunk skip = null) { m_tokenSource = new CancellationTokenSource(); bool finished = await m_dir.GetBlockBlobReference(Constants.c_finished).ExistsAsync(); if (!finished) { throw new SnapshotUploadUnfinishedException(); } m_buffer = new BufferBlock <Task <InMemoryDataChunk> >(new DataflowBlockOptions() { EnsureOrdered = true, CancellationToken = m_tokenSource.Token, BoundedCapacity = DynamicClusterConfig.Instance.ConcurrentDownloads }); Log.WriteLine(LogLevel.Info, $"{nameof(BlobDownloader)}: Begin downloading {m_version} [{m_lowKey}-{m_highKey}]."); if (skip != null) { Log.WriteLine(LogLevel.Info, $"{nameof(BlobDownloader)}: Version {m_version}: skipping {ChunkSerialization.ToString(skip)}."); } var idx = m_dir.GetBlockBlobReference(Constants.c_partition_index); var idx_content = await idx.DownloadTextAsync(); var chunks = idx_content.Split(new[] { '\r', '\n' }, StringSplitOptions.RemoveEmptyEntries) .Select(ChunkSerialization.Parse) .OrderBy(c => c.LowKey) .Where(c => skip == null || c.LowKey > skip.HighKey) .Where(InRange); foreach (var chunk in chunks) { m_buffer.Post(_Download_impl(chunk)); if (m_tokenSource.IsCancellationRequested) { break; } } }