public static void WriteGraph(IGraph graph) { lock (Locks.GetLock((object)"rdflock")) { var rdfxmlwriter = new RdfXmlWriter(); rdfxmlwriter.Save(graph, RdfFilePath); } }
public virtual Stream DownloadToStream(Guid blobId) { lock (BlobLockSet.GetLock(blobId.ToString())) { var file = GetFilePath(blobId); if (!Exists(file)) { return(null); } var memoryStream = new MemoryStream(File.ReadAllBytes(file)); memoryStream.Seek(0, SeekOrigin.Begin); return(memoryStream); } }
public override Item[] SelectItems(string query) { if (!CacheFastQueryResults || !IsFast(query)) { return(_database.SelectItems(query)); } if (!_multipleItems.TryGetValue(query, out var cached)) { lock (_multipleItemsLock.GetLock(query)) { if (!_multipleItems.TryGetValue(query, out cached)) { using (new SecurityDisabler()) { cached = _database.SelectItems(query); } _multipleItems.TryAdd(query, cached); } } } var results = from item in cached ?? Array.Empty <Item>() where item.Access.CanRead() select new Item(item.ID, item.InnerData, this); return(results.ToArray()); }
public override Item SelectSingleItem(string query) { if (!CacheFastQueryResults || !IsFast(query)) { return(_database.SelectSingleItem(query)); } if (!_singleItems.TryGetValue(query, out var cached)) { lock (_singleItemLocks.GetLock(query)) { if (!_singleItems.TryGetValue(query, out cached)) { using (new SecurityDisabler()) { cached = _database.SelectSingleItem(query); } _singleItems.TryAdd(query, cached); } } } if (cached?.Access.CanRead() == true) { var copy = new Item(cached.ID, cached.InnerData, this); return(copy); } return(null); }
public override bool SetBlobStream(Stream stream, Guid blobId, CallContext context) { if (!_configured) { return(base.SetBlobStream(stream, blobId, context)); } lock (_blobLockSet.GetLock(blobId.ToString())) { _blobManager.UploadFromStream(blobId, stream); //insert an empty reference to the BlobId into the SQL Blobs table, this is basically to assist with the cleanup process. //during cleanup, it's faster to query the database for the blobs that should be removed as opposed to retrieving and // parsing a list from external source. Also, remove any existing references. Api.Execute("DELETE FROM {0}Blobs{1} WHERE {0}BlobId{1} = {2}blobId{3}", "@blobId", blobId); const string cmdText = "INSERT INTO [Blobs]([Id], [BlobId], [Index], [Created], [Data]) VALUES(NewId(), @blobId, @index, @created, @data)"; using (var connection = new SqlConnection(Api.ConnectionString)) { connection.Open(); var command = new SqlCommand(cmdText, connection) { CommandTimeout = (int)CommandTimeout.TotalSeconds }; command.Parameters.AddWithValue("@blobId", blobId); command.Parameters.AddWithValue("@index", 0); command.Parameters.AddWithValue("@created", DateTime.UtcNow); command.Parameters.Add("@data", SqlDbType.Image, 0).Value = new byte[0]; command.ExecuteNonQuery(); } } return(true); }
public override bool SetBlobStream(Stream stream, Guid blobId, CallContext context) { lock (_blobSetLocks.GetLock(blobId)) { try { StorageProvider.Put(stream, blobId.ToString()); //insert an empty reference to the BlobId into the SQL Blobs table, this is basically to assist with the cleanup process. //during cleanup, it's faster to query the database for the blobs that should be removed as opposed to retrieving and parsing a list from Azure. const string cmdText = "INSERT INTO [Blobs]( [Id], [BlobId], [Index], [Created], [Data] ) VALUES( NewId(), @blobId, @index, @created, @data)"; using (var connection = new SqlConnection(Api.ConnectionString)) { connection.Open(); var command = new SqlCommand(cmdText, connection) { CommandTimeout = (int)CommandTimeout.TotalSeconds }; command.Parameters.AddWithValue("@blobId", blobId); command.Parameters.AddWithValue("@index", 0); command.Parameters.AddWithValue("@created", DateTime.UtcNow); command.Parameters.Add("@data", SqlDbType.Image, 0).Value = new byte[0]; command.ExecuteNonQuery(); } } catch (StorageException ex) { Log.Error($"Upload of blob with Id {blobId} failed.", ex, this); throw; } } return(true); }
public override bool UploadFromStream(Guid blobId, Stream stream) { lock (BlobLockSet.GetLock(blobId.ToString())) { try { Log.Info($"Uploading Blob {blobId:D} to Amazon S3", this); var memoryStream = new MemoryStream(); stream.CopyTo(memoryStream); memoryStream.Seek(0, SeekOrigin.Begin); var client = new AmazonS3Client(Credentials, Region); var request = new PutObjectRequest { BucketName = BucketName, Key = GetObjectKey(blobId), InputStream = memoryStream, AutoCloseStream = false, AutoResetStreamPosition = true, }; var response = client.PutObject(request); if (response == null) { return(false); } try { memoryStream.Seek(0, SeekOrigin.Begin); base.UploadFromStream(blobId, memoryStream); } catch (Exception ex) { Log.Error($"Can't store {blobId} in disk cache", ex, this); } return(true); } catch (AmazonS3Exception ex) { Log.Error($"Can't upload {GetObjectKey(blobId)} to {BucketName}", ex, this); return(false); } } }
private void UpdateLinks(Item item, IEnumerable <ItemLink> newLinks, Func <IEnumerable <Tuple <Guid, ItemLink> > > existingLinks) { lock (_locks.GetLock(item.ID)) { Factory.GetRetryer().ExecuteNoResult(() => { using (DataProviderTransaction transaction = DataApi.CreateTransaction()) { var batch = new SqlBatchOperation(DataApi); var linksToProcess = existingLinks().ToList(); foreach (var link in FilterItemLinks(newLinks)) { var existingLink = linksToProcess.FirstOrDefault(l => ItemLinksAreEqual(l.Item2, link)); if (existingLink != null) { linksToProcess.Remove(existingLink); continue; } if (link.SourceItemID.IsNull) { continue; } AddLink(batch, item, link); } foreach (var existingLinkId in linksToProcess.Select(l => l.Item1)) { RemoveLink(batch, existingLinkId); } batch.FlushBatches(); transaction.Complete(); } }); } }
public void RestoreFromBlobManager(Guid blobId) { lock (_blobLockSet.GetLock(blobId)) { var stream = _blobManager.DownloadToStream(blobId); if (stream == null) { return; } using (var con = new SqlConnection(_connectionString)) { con.Open(); var cmd = new SqlCommand(@"DELETE FROM [Blobs] WHERE [BlobId] = @blobId", con); cmd.CommandTimeout = CommandTimeout; cmd.Parameters.AddWithValue("@blobId", blobId); cmd.ExecuteNonQuery(); var created = DateTime.UtcNow; cmd = new SqlCommand("INSERT INTO [Blobs] ([Id], [BlobId], [Index], [Created], [Data]) VALUES(NewId(), @blobId, @index, @created, @data)", con); cmd.CommandTimeout = CommandTimeout; cmd.Parameters.AddWithValue("@blobId", blobId); cmd.Parameters.AddWithValue("@created", created); if (stream.CanSeek) { stream.Seek(0L, SeekOrigin.Begin); } const int chunkSize = 1029120; int chunkIndex = 0; var buffer = new byte[chunkSize]; var readLength = stream.Read(buffer, 0, chunkSize); while (readLength > 0) { cmd.Parameters.AddWithValue("@index", chunkIndex); cmd.Parameters.Add("@data", SqlDbType.Image, readLength).Value = buffer; cmd.ExecuteNonQuery(); readLength = stream.Read(buffer, 0, chunkSize); chunkIndex++; } if (con.State != ConnectionState.Closed) { con.Close(); } } } }