/// <summary> /// Replaces a chunk with another one /// </summary> /// <param name="id">Chunk id to replace</param> /// <param name="chunk">Replacement chunk</param> internal void ReplaceChunk(uint id, StorageChunk chunk) { Cache = null; var index = ChunkDictionary[id]; FailIndex(index); ChunkList[index] = chunk; ChunkDictionary.Remove(id); ChunkDictionary[chunk.Id] = index; }
/// <summary> /// REmoves a chunk from the chunk list /// </summary> /// <param name="chunk">Chunk to remove</param> internal void RemoveChunk(StorageChunk chunk) { Cache = null; var index = ChunkDictionary[chunk.Id]; ChunkList.RemoveAt(index); // Force recreation of ChunkDictionary ChunkList = ChunkList; //ChunkDictionary.Remove(chunk.Id); }
/// <summary> /// Reads the info from a stream /// </summary> /// <param name="stream">Stream to read from</param> /// <param name="cached">Cached storage info, if added and removed version values are the same returns it instead of reading from the stream again</param> /// <returns>Storage info in the stream</returns> internal static StorageInfo ReadFromStream(Stream stream, StorageInfo cached) { using (var r = new BinaryReader(stream, Encoding.UTF8)) { var i = r.ReadBoolean(); var mv = r.ReadUInt64(); if (cached != null && cached.Initialized == i && cached.ModifiedVersion == mv) { return new StorageInfo { Initialized = cached.Initialized, AddedVersion = cached.AddedVersion, RemovedVersion = cached.RemovedVersion, ModifiedVersion = cached.ModifiedVersion, _chunkList = cached._chunkList?.ToList(), ChunkDictionary = cached.ChunkDictionary?.ToDictionary(kv => kv.Key, kv => kv.Value), _stableChunkList = cached._stableChunkList?.ToList(), Cache = cached } } ; var av = r.ReadUInt64(); var rv = r.ReadUInt64(); var count = r.ReadInt32(); var si = new StorageInfo { Initialized = i, ModifiedVersion = mv, AddedVersion = av, RemovedVersion = rv }; for (var c = 0; c < count; c++) { si.AddChunk(StorageChunk.FromStream(r)); } return(si); } }
/// <summary> /// Adds a new chunk to the chunk list /// </summary> /// <param name="chunk">Chunk to add</param> internal void AddChunk(StorageChunk chunk) { Cache = null; ChunkList.Add(chunk); ChunkDictionary[chunk.Id] = ChunkList.Count - 1; }
/// <summary> /// Updates an existing chunk /// </summary> /// <param name="chunk">Chunk to update</param> internal void UpdateChunk(StorageChunk chunk) { ReplaceChunk(chunk.Id, chunk); }
/// <summary> /// Adds a chunk to the blob /// </summary> /// <param name="chunkType">Chunk type</param> /// <param name="userData">Chunk user data</param> /// <param name="data">Stream to add</param> /// <param name="token">Cancellation token</param> /// <returns>StorageChunk of the added chunk</returns> public Task <StorageChunk> AddChunk(int chunkType, uint userData, Stream data, CancellationToken token) { return(Task.Factory.StartNew(async() => { var l = data.Length - data.Position; if (l > uint.MaxValue) { throw new InvalidDataException("Chunk length greater than uint.MaxValue"); } var size = (uint)l; var chunk = default(StorageChunk); using (var f = await Open(token)) { long lockSize = 0; var ff = f; await Lock(ConcurrencyHandler.Timeout, token, false, () => { var info = ReadInfo(); CheckInitialized(info); // Check for exact size free chunk var free = info.Chunks.FirstOrDefault(fc => !fc.Changing && fc.Size == size && fc.Type == ChunkTypes.Free); if (free.Type != ChunkTypes.Free) { // Check for free chunk bigger than required free = info.Chunks.FirstOrDefault(fc => !fc.Changing && fc.Size > size + StorageChunk.ChunkHeaderSize + StorageChunk.ChunkFooterSize && fc.Type == ChunkTypes.Free); } StorageChunk?newFree = null; if (free.Type == ChunkTypes.Free) { // if free space found in blob if (free.Size == size) { // if chunk size equals with the free space size, replace free space with chunk chunk = new StorageChunk(free.Id, userData, chunkType, free.Position, size, DateTime.UtcNow) { Changing = true }; info.ReplaceChunk(free.Id, chunk); lockSize = size + StorageChunk.ChunkHeaderSize + StorageChunk.ChunkFooterSize; Log($"Replacing free chunk {free} with chunk {chunk}"); } else { // chunk size < free space size, remove chunk sized portion of the free space newFree = new StorageChunk(free.Id, 0, ChunkTypes.Free, free.Position + size + StorageChunk.ChunkHeaderSize + StorageChunk.ChunkFooterSize, free.Size - size - StorageChunk.ChunkHeaderSize - StorageChunk.ChunkFooterSize, DateTime.UtcNow); info.UpdateChunk(newFree.Value); chunk = new StorageChunk(GetId(info.Chunks), userData, chunkType, free.Position, size, DateTime.UtcNow) { Changing = true }; info.AddChunk(chunk); lockSize = free.Size + StorageChunk.ChunkHeaderSize + StorageChunk.ChunkFooterSize; Log($"Split free chunk {free} to chunk {chunk} and free {newFree}"); } } else { // no space found, add chunk at the end of the file var last = info.Chunks.OrderByDescending(ch => ch.Position).FirstOrDefault(); var position = last.Position == 0 ? HeaderSize : last.Position + last.Size + StorageChunk.ChunkHeaderSize + StorageChunk.ChunkFooterSize; chunk = new StorageChunk(GetId(info.Chunks), userData, chunkType, position, size, DateTime.UtcNow) { Changing = true }; info.AddChunk(chunk); lockSize = chunk.Size + StorageChunk.ChunkHeaderSize + StorageChunk.ChunkFooterSize; Log($"Add chunk {chunk}"); } using (var fr = ff.Range(chunk.Position, lockSize, LockMode.Exclusive)) using (var w = new BinaryWriter(fr, Encoding.UTF8)) { if (fr.Stream.Length < chunk.Position + lockSize) { fr.Stream.SetLength(chunk.Position + lockSize); } if (newFree.HasValue) { // write out new free chunk header fr.Position = newFree.Value.Position - chunk.Position; newFree.Value.ToStorage(w); fr.Flush(); } // write chunk data to blob with FREE chunk type fr.Position = 0; chunk.ToStorage(w, true); fr.Flush(); } WriteInfo(info, true); }); var ok = false; try { using (var fr = f.Range(chunk.Position, lockSize, LockMode.Exclusive)) using (var w = new BinaryWriter(fr, Encoding.UTF8)) { fr.Position = StorageChunk.ChunkHeaderSize; // write chunk data to stream var buffer = new byte[81920]; long remaining = size; ushort crc = 0; while (remaining > 0) { var bytesRead = await data.ReadAsync(buffer, 0, (int)Math.Min(remaining, buffer.Length), token).ConfigureAwait(false); crc = Crc16.ComputeChecksum(buffer, 0, bytesRead, crc); if (bytesRead != 0) { await fr.WriteAsync(buffer, 0, bytesRead, token).ConfigureAwait(false); } else { break; } remaining -= bytesRead; } w.Write(crc); fr.Flush(); // write correct chunk type fr.Position = 0; chunk.ToStorage(w); fr.Flush(); ok = true; } } finally { await Lock(ConcurrencyHandler.Timeout * 4, CancellationToken.None, true, () => { var info = ReadInfo(); CheckInitialized(info); // Exception occured, chunk should stay free if (!ok) { chunk = info.GetChunkById(chunk.Id); } chunk.Changing = false; info.UpdateChunk(chunk); info.AddedVersion++; WriteInfo(info, true); }); } } return chunk; }, token, TaskCreationOptions.DenyChildAttach, Scheduler).Unwrap()); }