private static void PrepareBlob(int fileId, int contentSize, int chunkSize, out AzureBlobProvider provider, out BlobStorageContext context, out string blobId, out byte[] content, string testBlobId = null) { content = Encoding.ASCII.GetBytes(Enumerable.Repeat(0, contentSize) .SelectMany(n => "A").ToArray()); //.SelectMany(n => "0123456789ABCDEF").ToArray()); provider = new AzureBlobProvider(ConnectionString, chunkSize); context = new BlobStorageContext(provider) { FileId = fileId, Length = content.Length, PropertyTypeId = 0, BlobProviderData = new AzureBlobProviderData { BlobId = string.IsNullOrEmpty(testBlobId) ? Guid.NewGuid().ToString() : testBlobId } }; provider.AllocateAsync(context, CancellationToken.None).GetAwaiter().GetResult(); blobId = ((AzureBlobProviderData)context.BlobProviderData).BlobId; Cleanup(provider, blobId); }
public Task DeleteAsync(BlobStorageContext context, CancellationToken cancellationToken) { var data = (InMemoryChunkBlobProviderData)context.BlobProviderData; _blobStorage.Remove(data.Id); return(Task.CompletedTask); }
public Task <BlobStorageContext> GetBlobStorageContextAsync(int fileId, bool clearStream, int versionId, int propertyTypeId, CancellationToken cancellationToken) { var fileDoc = DataProvider.DB.Files.FirstOrDefault(x => x.FileId == fileId); if (fileDoc == null) { return(null); } var length = fileDoc.Size; var providerName = fileDoc.BlobProvider; var providerData = fileDoc.BlobProviderData; var provider = BlobStorageBase.GetProvider(providerName); var result = new BlobStorageContext(provider, providerData) { VersionId = versionId, PropertyTypeId = propertyTypeId, FileId = fileId, Length = length, BlobProviderData = provider == BlobStorageBase.BuiltInProvider ? new BuiltinBlobProviderData() : provider.ParseData(providerData) }; return(STT.Task.FromResult(result)); }
private static void WriteChunkToSql(BlobStorageContext context, long offset, byte[] buffer) { using (var cmd = GetWriteChunkToSqlProcedure(context, offset, buffer)) { cmd.ExecuteNonQuery(); } }
private static async Task WriteChunkToSqlAsync(BlobStorageContext context, long offset, byte[] buffer) { using (var cmd = GetWriteChunkToSqlProcedure(context, offset, buffer)) { await cmd.ExecuteNonQueryAsync(); } }
/// <summary> /// DO NOT USE DIRECTLY THIS METHOD FROM YOUR CODE. /// Updates the stream in the appropriate row of the Files table specified by the context. /// </summary> public void UpdateStream(BlobStorageContext context, Stream stream) { // We have to work with an integer since SQL does not support // binary values bigger than [Int32.MaxValue]. var bufferSize = Convert.ToInt32(stream.Length); var buffer = new byte[bufferSize]; if (bufferSize > 0) { // Read bytes from the source stream.Seek(0, SeekOrigin.Begin); stream.Read(buffer, 0, bufferSize); } //UNDONE: [DIREF] get connection string through constructor using (var ctx = new MsSqlDataContext(ConnectionStrings.ConnectionString, DataOptions, CancellationToken.None)) { ctx.ExecuteNonQueryAsync(WriteStreamScript, cmd => { cmd.Parameters.AddRange(new[] { ctx.CreateParameter("@Id", SqlDbType.Int, context.FileId), ctx.CreateParameter("@Value", SqlDbType.VarBinary, bufferSize, buffer), }); }).GetAwaiter().GetResult(); } }
/// <summary> /// Loads a SqlFileStream object for the binary in the provided context and sets it to the required position. /// If the filestream is shorter than the required offset, it extends the stream with empty bytes. /// </summary> private static SqlFileStream GetAndExtendFileStream(BlobStorageContext context, long offset) { var fsd = ((BuiltinBlobProviderData)context.BlobProviderData).FileStreamData; if (fsd == null) { throw new InvalidOperationException("File row not found. FileId: " + context.FileId); } var fs = new SqlFileStream(fsd.Path, fsd.TransactionContext, FileAccess.ReadWrite, FileOptions.SequentialScan, 0); // if the current stream is smaller than the position where we want to write the bytes if (fs.Length < offset) { // go to the end of the existing stream fs.Seek(0, SeekOrigin.End); // calculate the size of the gap (warning: fs.Length changes during the write below!) var gapSize = offset - fs.Length; // fill the gap with empty bytes (one-by-one, because this gap could be huge) for (var i = 0; i < gapSize; i++) { fs.WriteByte(0x00); } } else if (offset > 0) { // otherwise we will append to the end or overwrite existing bytes fs.Seek(offset, SeekOrigin.Begin); } return(fs); }
private static void EditFileStream(FileDoc fileRecord, Action <XmlDocument> action) { var ctx = BlobStorage.GetBlobStorageContextAsync(fileRecord.FileId, CancellationToken.None) .GetAwaiter().GetResult(); var blobProvider = ctx.Provider; var gcXmlDoc = new XmlDocument(); using (var xmlReaderStream = blobProvider.GetStreamForRead(ctx)) gcXmlDoc.Load(xmlReaderStream); action(gcXmlDoc); var ctdString = gcXmlDoc.OuterXml; var blobProvider2 = BlobStorage.GetProvider(ctdString.Length); var ctx2 = new BlobStorageContext(blobProvider) { VersionId = ctx.VersionId, PropertyTypeId = ctx.PropertyTypeId, Length = ctdString.Length }; blobProvider2.AllocateAsync(ctx2, CancellationToken.None).GetAwaiter().GetResult(); using (var xmlWriterStream = blobProvider2.GetStreamForWrite(ctx2)) { xmlWriterStream.Write(Encoding.UTF8.GetBytes(ctdString)); xmlWriterStream.Flush(); fileRecord.Size = xmlWriterStream.Length; } fileRecord.BlobProvider = blobProvider2.GetType().FullName; fileRecord.BlobProviderData = BlobStorageContext.SerializeBlobProviderData(ctx2.BlobProviderData); }
public async Task ClearAsync(BlobStorageContext context, CancellationToken cancellationToken) { var id = GetData(context).Id; DeleteFile(id); await CreateFileAsync(id, null, cancellationToken); }
public Task ClearAsync(BlobStorageContext context, CancellationToken cancellationToken) { var data = (InMemoryChunkBlobProviderData)context.BlobProviderData; _blobStorage[data.Id] = new byte[0][]; return(Task.CompletedTask); }
public async Task <BinaryCacheEntity> LoadBinaryCacheEntityAsync(int versionId, int propertyTypeId, SnDataContext dataContext) { if (!(dataContext is MsSqlDataContext sqlCtx)) { throw new PlatformNotSupportedException(); } return(await sqlCtx.ExecuteReaderAsync(LoadBinaryCacheEntityScript, cmd => { cmd.Parameters.AddRange(new[] { sqlCtx.CreateParameter("@MaxSize", DbType.Int32, BlobStorageOptions.BinaryCacheSize), sqlCtx.CreateParameter("@VersionId", DbType.Int32, versionId), sqlCtx.CreateParameter("@PropertyTypeId", DbType.Int32, propertyTypeId), }); }, async (reader, cancel) => { cancel.ThrowIfCancellationRequested(); if (!reader.HasRows || !await reader.ReadAsync(cancel).ConfigureAwait(false)) { return null; } var length = reader.GetInt64(0); var binaryPropertyId = reader.GetInt32(1); var fileId = reader.GetInt32(2); var providerName = reader.GetSafeString(3); var providerTextData = reader.GetSafeString(4); byte[] rawData = null; var provider = Providers.GetProvider(providerName); var context = new BlobStorageContext(provider, providerTextData) { VersionId = versionId, PropertyTypeId = propertyTypeId, FileId = fileId, Length = length }; if (provider is IBuiltInBlobProvider) { context.BlobProviderData = new BuiltinBlobProviderData(); if (!reader.IsDBNull(5)) { rawData = (byte[])reader.GetValue(5); } } return new BinaryCacheEntity { Length = length, RawData = rawData, BinaryPropertyId = binaryPropertyId, FileId = fileId, Context = context }; }).ConfigureAwait(false)); }
public Stream GetStreamForRead(BlobStorageContext context) { SnTrace.Database.Write("AzureBlobProvider.GetStreamForRead: {0}", context.BlobProviderData); var providerData = (AzureBlobProviderData)context.BlobProviderData; var blob = GetBlob(providerData.BlobId); return(blob.OpenRead(options: Options)); }
private static async Task WriteChunkToFilestreamAsync(BlobStorageContext context, long offset, byte[] buffer) { using (var fs = GetAndExtendFileStream(context, offset)) { // no offset is needed here, the stream is already at the correct position await fs.WriteAsync(buffer, 0, buffer.Length); } }
UPDATE Files SET Stream = @Value WHERE FileId = @Id;"; // proc_BinaryProperty_WriteStream /// <summary> /// DO NOT USE DIRECTLY THIS METHOD FROM YOUR CODE. /// Writes the stream in the appropriate row of the Files table specified by the context. /// </summary> public void AddStream(BlobStorageContext context, Stream stream) { if (stream == null || stream.Length == 0L) { return; } UpdateStream(context, stream); }
public static Task AddStreamAsync(BlobStorageContext context, Stream stream, MsSqlDataContext dataContext) { if (stream == null || stream.Length == 0L) { return(Task.CompletedTask); } return(UpdateStreamAsync(context, stream, dataContext)); }
public void UpdateBinaryProperty(IBlobProvider blobProvider, BinaryDataValue value) { var streamLength = value.Stream?.Length ?? 0; if (streamLength > 0) { var ctx = new BlobStorageContext(blobProvider, value.BlobProviderData) { VersionId = 0, PropertyTypeId = 0, FileId = value.FileId, Length = streamLength, UseFileStream = false }; blobProvider.Allocate(ctx); using (var stream = blobProvider.GetStreamForWrite(ctx)) value.Stream?.CopyTo(stream); value.BlobProviderName = ctx.Provider.GetType().FullName; value.BlobProviderData = BlobStorageContext.SerializeBlobProviderData(ctx.BlobProviderData); } var isRepositoryStream = value.Stream is RepositoryStream || value.Stream is SenseNetSqlFileStream; var hasStream = isRepositoryStream || value.Stream is MemoryStream; if (!hasStream) { // do not do any database operation if the stream is not modified return; } var db = _dataProvider.DB; var fileId = db.Files.Count == 0 ? 1 : db.Files.Max(r => r.FileId) + 1; db.Files.Add(new InMemoryDataProvider.FileRecord { FileId = fileId, ContentType = value.ContentType, Extension = value.FileName.Extension, FileNameWithoutExtension = value.FileName.FileNameWithoutExtension, Size = Math.Max(0, value.Size), BlobProvider = value.BlobProviderName, BlobProviderData = value.BlobProviderData }); var binaryPropertyRow = db.BinaryProperties.FirstOrDefault(r => r.BinaryPropertyId == value.Id); if (binaryPropertyRow != null) { binaryPropertyRow.FileId = fileId; } if (fileId > 0 && fileId != value.FileId) { value.FileId = fileId; } }
public Stream CloneStream(BlobStorageContext context, Stream stream) { if (!(stream is FileSystemChunkReaderStream)) { throw new InvalidOperationException("Stream must be a FileSystemChunkReaderStream in the local disk provider."); } return(GetStreamForRead(context)); }
public async Task WriteAsync(BlobStorageContext context, long offset, byte[] buffer, CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); using (var stream = GetAndExtendStream(context, offset, buffer.Length)) { await stream.WriteAsync(buffer, 0, buffer.Length, cancellationToken); } }
public Stream CloneStream(BlobStorageContext context, Stream stream) { if (!(stream is FileStream)) { throw new InvalidOperationException("Stream must be a FileStream in the local disk provider."); } return(GetStream(context, FileMode.Open)); }
public Task DeleteAsync(BlobStorageContext context, CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); var id = GetData(context).Id; DeleteFolder(id); return(Task.CompletedTask); }
/// <inheritdoc /> public Stream GetStreamForRead(BlobStorageContext context) { if (BlobStorage == null) { throw new InvalidOperationException("BlobStorage back reference is not set."); } return(new RepositoryStream(context.FileId, context.Length, BlobStorage)); }
/// <summary> /// Loads a cache item into memory that either contains the raw binary (if its size fits into the limit) or /// just the blob metadata pointing to the blob storage. /// </summary> /// <param name="versionId">Content version id.</param> /// <param name="propertyTypeId">Binary property type id.</param> public BinaryCacheEntity LoadBinaryCacheEntity(int versionId, int propertyTypeId) { var commandText = string.Format(LoadBinaryCacheEntityFormatScript, BlobStorage.BinaryCacheSize); using (var cmd = new SqlProcedure { CommandText = commandText }) { cmd.Parameters.Add("@VersionId", SqlDbType.Int).Value = versionId; cmd.Parameters.Add("@PropertyTypeId", SqlDbType.Int).Value = propertyTypeId; cmd.CommandType = CommandType.Text; using (var reader = cmd.ExecuteReader(CommandBehavior.SingleRow | CommandBehavior.SingleResult)) { if (!reader.HasRows || !reader.Read()) { return(null); } var length = reader.GetInt64(0); var binaryPropertyId = reader.GetInt32(1); var fileId = reader.GetInt32(2); var providerName = reader.GetSafeString(3); var providerTextData = reader.GetSafeString(4); byte[] rawData; if (reader.IsDBNull(5)) { rawData = null; } else { rawData = (byte[])reader.GetValue(5); } var provider = BlobStorageBase.GetProvider(providerName); var context = new BlobStorageContext(provider, providerTextData) { VersionId = versionId, PropertyTypeId = propertyTypeId, FileId = fileId, Length = length }; if (provider == BlobStorageBase.BuiltInProvider) { context.BlobProviderData = new BuiltinBlobProviderData(); } return(new BinaryCacheEntity { Length = length, RawData = rawData, BinaryPropertyId = binaryPropertyId, FileId = fileId, Context = context }); } } }
public Stream GetStreamForRead(BlobStorageContext context) { var data = (BuiltinBlobProviderData)context.BlobProviderData; if (context.UseFileStream) { return(new SenseNetSqlFileStream(context.Length, context.FileId, data.FileStreamData)); } return(new RepositoryStream(context.FileId, context.Length)); }
public Task AllocateAsync(BlobStorageContext context, CancellationToken cancellationToken) { var id = Guid.NewGuid(); CreateFolder(id); context.BlobProviderData = new LocalDiskChunkBlobProviderData { Id = id, ChunkSize = ChunkSizeInBytes }; return(Task.CompletedTask); }
public Stream GetStreamForWrite(BlobStorageContext context) { SnTrace.Database.Write("AzureBlobProvider.GetStreamForWrite: {0}", context.BlobProviderData); var providerData = (AzureBlobProviderData)context.BlobProviderData; var blob = GetBlob(providerData.BlobId); var stream = blob.OpenWrite(options: Options); SetBlobMetadata(blob, context, false); return(stream); }
public async Task AllocateAsync(BlobStorageContext context, CancellationToken cancellationToken) { var id = Guid.NewGuid(); await CreateFileAsync(id, null, cancellationToken); context.BlobProviderData = new LocalDiskBlobProviderData { Id = id }; }
public STT.Task InsertBinaryPropertyAsync(IBlobProvider blobProvider, BinaryDataValue value, int versionId, int propertyTypeId, bool isNewNode, SnDataContext dataContext) { var streamLength = value.Stream?.Length ?? 0; var ctx = new BlobStorageContext(blobProvider) { VersionId = versionId, PropertyTypeId = propertyTypeId, FileId = 0, Length = streamLength }; // blob operation blobProvider.AllocateAsync(ctx, CancellationToken.None).GetAwaiter().GetResult(); using (var stream = blobProvider.GetStreamForWrite(ctx)) value.Stream?.CopyTo(stream); value.BlobProviderName = ctx.Provider.GetType().FullName; value.BlobProviderData = BlobStorageContext.SerializeBlobProviderData(ctx.BlobProviderData); // metadata operation var db = DataProvider.DB; if (!isNewNode) { DeleteBinaryPropertyAsync(versionId, propertyTypeId, dataContext).GetAwaiter().GetResult(); } var fileId = db.Files.GetNextId(); db.Files.Insert(new FileDoc { FileId = fileId, ContentType = value.ContentType, Extension = value.FileName.Extension, FileNameWithoutExtension = value.FileName.FileNameWithoutExtension, Size = Math.Max(0, value.Size), BlobProvider = value.BlobProviderName, BlobProviderData = value.BlobProviderData }); var binaryPropertyId = db.BinaryProperties.GetNextId(); db.BinaryProperties.Insert(new BinaryPropertyDoc { BinaryPropertyId = binaryPropertyId, FileId = fileId, PropertyTypeId = propertyTypeId, VersionId = versionId }); value.Id = binaryPropertyId; value.FileId = fileId; value.Timestamp = 0L; //TODO: file row timestamp return(STT.Task.CompletedTask); }
public async Task WriteAsync(BlobStorageContext context, long offset, byte[] buffer) { if (BlobStorageBase.UseFileStream(context.Length)) { await WriteChunkToFilestreamAsync(context, offset, buffer); } else { await WriteChunkToSqlAsync(context, offset, buffer); } }
private static void SetBlobMetadata(CloudBlockBlob blob, BlobStorageContext context, bool commitToAzure = true) { blob.Metadata[FileId] = context.FileId.ToString(); blob.Metadata[VersionId] = context.VersionId.ToString(); blob.Metadata[PropertyTypeId] = context.PropertyTypeId.ToString(); if (commitToAzure) { blob.SetMetadata(options: Options); } }
public async Task DeleteAsync(BlobStorageContext context, CancellationToken cancellationToken) { using (var op = SnTrace.Database.StartOperation("AzureBlobProvider.Delete: {0}", context.BlobProviderData)) { var providerData = (AzureBlobProviderData)context.BlobProviderData; var blob = GetBlob(providerData.BlobId); await blob.DeleteAsync(cancellationToken).ConfigureAwait(false); op.Successful = true; } }