/// <summary> /// Initializes a new instance of the <see cref="StorageChunk" /> struct /// </summary> /// <param name="id">Chunk id</param> /// <param name="userData">User data</param> /// <param name="chunkType">Chunk type</param> /// <param name="position">Position in the storage</param> /// <param name="size">Chunk data size</param> /// <param name="added">Date the chunk was added</param> /// <param name="crc">Crc of the chunk header</param> private StorageChunk(uint id, uint userData, int chunkType, long position, uint size, long added, ushort?crc) { Id = id; Type = chunkType; Position = position; Size = size; UserData = userData; AddedTicks = added; Changing = false; ReadCount = 0; Crc = crc ?? (ushort)0; Added = DateTime.MinValue; // Use fixed size buffer so we don't need to call ToArray() on the stream, it will be one less byte[] allocation if (!crc.HasValue) { using (var ms = new MemoryStream(new byte[ChunkHeaderSize - 2], 0, ChunkHeaderSize - 2, true, true)) using (var bw = new BinaryWriter(ms)) { ToStorage(bw, false, true); Crc = Crc16.ComputeChecksum(ms.GetBuffer()); } } }
/// <summary> /// Adds a chunk to the blob /// </summary> /// <param name="chunkType">Chunk type</param> /// <param name="userData">Chunk user data</param> /// <param name="data">Stream to add</param> /// <param name="token">Cancellation token</param> /// <returns>StorageChunk of the added chunk</returns> public Task <StorageChunk> AddChunk(int chunkType, uint userData, Stream data, CancellationToken token) { return(Task.Factory.StartNew(async() => { var l = data.Length - data.Position; if (l > uint.MaxValue) { throw new InvalidDataException("Chunk length greater than uint.MaxValue"); } var size = (uint)l; var chunk = default(StorageChunk); using (var f = await Open(token)) { long lockSize = 0; var ff = f; await Lock(ConcurrencyHandler.Timeout, token, false, () => { var info = ReadInfo(); CheckInitialized(info); // Check for exact size free chunk var free = info.Chunks.FirstOrDefault(fc => !fc.Changing && fc.Size == size && fc.Type == ChunkTypes.Free); if (free.Type != ChunkTypes.Free) { // Check for free chunk bigger than required free = info.Chunks.FirstOrDefault(fc => !fc.Changing && fc.Size > size + StorageChunk.ChunkHeaderSize + StorageChunk.ChunkFooterSize && fc.Type == ChunkTypes.Free); } StorageChunk?newFree = null; if (free.Type == ChunkTypes.Free) { // if free space found in blob if (free.Size == size) { // if chunk size equals with the free space size, replace free space with chunk chunk = new StorageChunk(free.Id, userData, chunkType, free.Position, size, DateTime.UtcNow) { Changing = true }; info.ReplaceChunk(free.Id, chunk); lockSize = size + StorageChunk.ChunkHeaderSize + StorageChunk.ChunkFooterSize; Log($"Replacing free chunk {free} with chunk {chunk}"); } else { // chunk size < free space size, remove chunk sized portion of the free space newFree = new StorageChunk(free.Id, 0, ChunkTypes.Free, free.Position + size + StorageChunk.ChunkHeaderSize + StorageChunk.ChunkFooterSize, free.Size - size - StorageChunk.ChunkHeaderSize - StorageChunk.ChunkFooterSize, DateTime.UtcNow); info.UpdateChunk(newFree.Value); chunk = new StorageChunk(GetId(info.Chunks), userData, chunkType, free.Position, size, DateTime.UtcNow) { Changing = true }; info.AddChunk(chunk); lockSize = free.Size + StorageChunk.ChunkHeaderSize + StorageChunk.ChunkFooterSize; Log($"Split free chunk {free} to chunk {chunk} and free {newFree}"); } } else { // no space found, add chunk at the end of the file var last = info.Chunks.OrderByDescending(ch => ch.Position).FirstOrDefault(); var position = last.Position == 0 ? HeaderSize : last.Position + last.Size + StorageChunk.ChunkHeaderSize + StorageChunk.ChunkFooterSize; chunk = new StorageChunk(GetId(info.Chunks), userData, chunkType, position, size, DateTime.UtcNow) { Changing = true }; info.AddChunk(chunk); lockSize = chunk.Size + StorageChunk.ChunkHeaderSize + StorageChunk.ChunkFooterSize; Log($"Add chunk {chunk}"); } using (var fr = ff.Range(chunk.Position, lockSize, LockMode.Exclusive)) using (var w = new BinaryWriter(fr, Encoding.UTF8)) { if (fr.Stream.Length < chunk.Position + lockSize) { fr.Stream.SetLength(chunk.Position + lockSize); } if (newFree.HasValue) { // write out new free chunk header fr.Position = newFree.Value.Position - chunk.Position; newFree.Value.ToStorage(w); fr.Flush(); } // write chunk data to blob with FREE chunk type fr.Position = 0; chunk.ToStorage(w, true); fr.Flush(); } WriteInfo(info, true); }); var ok = false; try { using (var fr = f.Range(chunk.Position, lockSize, LockMode.Exclusive)) using (var w = new BinaryWriter(fr, Encoding.UTF8)) { fr.Position = StorageChunk.ChunkHeaderSize; // write chunk data to stream var buffer = new byte[81920]; long remaining = size; ushort crc = 0; while (remaining > 0) { var bytesRead = await data.ReadAsync(buffer, 0, (int)Math.Min(remaining, buffer.Length), token).ConfigureAwait(false); crc = Crc16.ComputeChecksum(buffer, 0, bytesRead, crc); if (bytesRead != 0) { await fr.WriteAsync(buffer, 0, bytesRead, token).ConfigureAwait(false); } else { break; } remaining -= bytesRead; } w.Write(crc); fr.Flush(); // write correct chunk type fr.Position = 0; chunk.ToStorage(w); fr.Flush(); ok = true; } } finally { await Lock(ConcurrencyHandler.Timeout * 4, CancellationToken.None, true, () => { var info = ReadInfo(); CheckInitialized(info); // Exception occured, chunk should stay free if (!ok) { chunk = info.GetChunkById(chunk.Id); } chunk.Changing = false; info.UpdateChunk(chunk); info.AddedVersion++; WriteInfo(info, true); }); } } return chunk; }, token, TaskCreationOptions.DenyChildAttach, Scheduler).Unwrap()); }