public IStreamedBlob Project(SqlCommand cmd, SqlDataReader dr) { BlobID id = (BlobID)dr.GetSqlBinary(0).Value.ToArray(20); long length = dr.GetSqlInt64(1).Value; return(new StreamedBlob(this._blrepo, id, length)); }
internal async Task PersistTreeNodesByBlobPaths() { await createTrees(); await trrepo.PersistTree(rootId, trees); BlobID blid = sblobs[0].Value.ID; // BlobID.TryParse("0123456789012345678901234567890123456789").Value; var etrnodes = await trrepo.PersistTreeNodesByBlobPaths(rootId, new CanonicalBlobIDPath[] { new CanonicalBlobIDPath((CanonicalBlobPath)"/blob", blid), new CanonicalBlobIDPath((CanonicalBlobPath)"/blob2", blid), new CanonicalBlobIDPath((CanonicalBlobPath)"/blob3", blid), new CanonicalBlobIDPath((CanonicalBlobPath)"/content/blob", blid), new CanonicalBlobIDPath((CanonicalBlobPath)"/content/blob2", blid), new CanonicalBlobIDPath((CanonicalBlobPath)"/content/blob3", blid), new CanonicalBlobIDPath((CanonicalBlobPath)"/content/images/blob", blid), new CanonicalBlobIDPath((CanonicalBlobPath)"/content/images/blob2", blid), new CanonicalBlobIDPath((CanonicalBlobPath)"/content/images/blob3", blid), new CanonicalBlobIDPath((CanonicalBlobPath)"/templates/html", blid), new CanonicalBlobIDPath((CanonicalBlobPath)"/templates/page", blid), new CanonicalBlobIDPath((CanonicalBlobPath)"/pages/sample", blid), new CanonicalBlobIDPath((CanonicalBlobPath)"/pages/section1/sample", blid), new CanonicalBlobIDPath((CanonicalBlobPath)"/pages/section2/sample2", blid), new CanonicalBlobIDPath((CanonicalBlobPath)"/pages/section3/sample3", blid), new CanonicalBlobIDPath((CanonicalBlobPath)"/newfolder/newsubfolder/sample4", blid) }); assertNoErrors(etrnodes); }
internal FileInfo getPathByID(BlobID id) { DirectoryInfo objDir = getObjectsDirectory(); string idStr = id.ToString(); string path = System.IO.Path.Combine(objDir.FullName, idStr.Substring(0, 2), idStr.Substring(2)); return new FileInfo(path); }
internal FileInfo getPathByID(BlobID id) { DirectoryInfo objDir = getObjectsDirectory(); string idStr = id.ToString(); string path = System.IO.Path.Combine(objDir.FullName, idStr.Substring(0, 2), idStr.Substring(2)); return(new FileInfo(path)); }
public Maybe<Blob> MaybeGet(BlobID id) { Blob value; if (_container.TryGetValue(id, out value)) return new Maybe<Blob>(value); return Maybe<Blob>.Nothing; }
private Task <Errorable <IStreamedBlob> > getBlob(BlobID id) { var fi = system.getPathByID(id); if (!fi.Exists) { return(Task.FromResult((Errorable <IStreamedBlob>) new BlobIDRecordDoesNotExistError(id))); } return(Task.FromResult(new Errorable <IStreamedBlob>((IStreamedBlob) new StreamedBlob(this, id, fi.Length)))); }
public Maybe <Blob> MaybeGet(BlobID id) { Blob value; if (_container.TryGetValue(id, out value)) { return(new Maybe <Blob>(value)); } return(Maybe <Blob> .Nothing); }
public override FeatureBase clone() { IntPtr cPtr = libtischPINVOKE.BlobID_clone(swigCPtr); BlobID ret = (cPtr == IntPtr.Zero) ? null : new BlobID(cPtr, false); if (libtischPINVOKE.SWIGPendingException.Pending) { throw libtischPINVOKE.SWIGPendingException.Retrieve(); } return(ret); }
public new static BlobID dynamic_cast(FeatureBase arg0) { IntPtr cPtr = libtischPINVOKE.BlobID_dynamic_cast(FeatureBase.getCPtr(arg0)); BlobID ret = (cPtr == IntPtr.Zero) ? null : new BlobID(cPtr, false); if (libtischPINVOKE.SWIGPendingException.Pending) { throw libtischPINVOKE.SWIGPendingException.Retrieve(); } return(ret); }
public Errorable <TreePathStreamedBlob> retrieve(SqlCommand cmd, SqlDataReader dr) { if (!dr.Read()) { return(new BlobNotFoundByPathError(this._treePath)); } BlobID id = (BlobID)dr.GetSqlBinary(0).Value.ToArray(20); long length = dr.GetSqlInt64(1).Value; return(new TreePathStreamedBlob(this._treePath, new StreamedBlob(this._blrepo, id, length))); }
public Task <Errorable <BlobID> > ResolvePartialID(BlobID.Partial id) { FileInfo[] fis = system.getPathsByPartialID(id); if (fis.Length == 1) { return(Task.FromResult(BlobID.TryParse(id.ToString().Substring(0, 2) + fis[0].Name))); } if (fis.Length == 0) { return(Task.FromResult((Errorable <BlobID>) new BlobIDPartialNoResolutionError(id))); } return(Task.FromResult((Errorable <BlobID>) new BlobIDPartialAmbiguousResolutionError(id, fis.SelectAsArray(f => BlobID.TryParse(id.ToString().Substring(0, 2) + f.Name).Value)))); }
internal StreamedBlob(StreamedBlobRepository blrepo, BlobID id, long?length = null) { this.blrepo = blrepo; this.ID = id; this.Length = length; #if DEBUG // Catch the error early-on. if (!blrepo.FileSystem.getPathByID(id).Exists) { throw new BlobIDRecordDoesNotExistError(id); } #endif }
internal async Task StreamedBlobTest() { const int numBlobs = 1; PersistingBlob[] blobs = createBlobs(numBlobs); // Persist the blobs: var streamedBlobs = await blrepo.PersistBlobs(blobs); // Load a streamed blob: Console.WriteLine("Awaiting fetch of streamed blob."); Console.WriteLine("Awaiting ReadStream to complete..."); await streamedBlobs[0].Value.ReadStreamAsync(async blsr => { Console.WriteLine("blob is {0} length", blsr.Length); SHA1 sha1 = SHA1.Create(); const int bufsize = 8040 * 8 * 4; byte[] dum = new byte[bufsize]; int count = bufsize; try { while ((count = blsr.Read(dum, 0, bufsize)) > 0) { sha1.TransformBlock(dum, 0, count, null, 0); //for (int i = 0; i < (count / 40) + ((count % 40) > 0 ? 1 : 0); ++i) //{ // Console.WriteLine(dum.ToHexString(i * 40, Math.Min(count - (i * 40), 40))); //} } sha1.TransformFinalBlock(dum, 0, 0); byte[] hash = sha1.Hash; BlobID retrievedID = new BlobID(hash); Console.WriteLine("SHA1 is {0}", hash.ToHexString(0, hash.Length)); } catch (Exception ex) { Console.WriteLine(ex.ToString()); } return(Errorable.NoErrors); }); }
private Errorable <BlobID> deleteBlob(BlobID id) { FileInfo path = system.getPathByID(id); lock (FileSystem.SystemLock) { if (!path.Exists) { return(new BlobIDRecordDoesNotExistError(id)); } path.Delete(); } return(id); }
private Errorable validateHash(SHA1StreamReader hasher, Errorable result) { // Read the rest of the stream in order to fully compute the SHA-1 hash: byte[] dummyBuffer = new byte[dummyBufferSize]; while (hasher.Read(dummyBuffer, 0, dummyBufferSize) > 0) { ; } // Verify the computed BlobID is equivalent to the expected BlobID to detect data tampering: BlobID hashID = new BlobID(hasher.GetHash()); if (hashID != this.ID) { return(new ComputedBlobIDMismatchError(hashID, this.ID)); } return(result); }
public static BlobID ComputeID(Stream m) { // SHA1 instances are NOT thread-safe. SHA1 sh = SHA1.Create(); const int bufsize = 8040 * 8 * 4; byte[] dum = new byte[bufsize]; int count = bufsize; while ((count = m.Read(dum, 0, bufsize)) > 0) { sh.TransformBlock(dum, 0, count, null, 0); } sh.TransformFinalBlock(dum, 0, 0); byte[] hash = sh.Hash; BlobID id = new BlobID(hash); return id; }
public override int GetHashCode() { int hash = 1; if (IsLast != false) { hash ^= IsLast.GetHashCode(); } if (BlobID != 0) { hash ^= BlobID.GetHashCode(); } if (FragmentIndex != 0) { hash ^= FragmentIndex.GetHashCode(); } if (Fragment.Length != 0) { hash ^= Fragment.GetHashCode(); } return(hash); }
public TResult Retrieve(SqlCommand cmd, SqlDataReader dr, int expectedCapacity = 10) { if (!dr.Read()) { return(default(TResult)); } // Read the BlobID: BlobID id = (BlobID)dr.GetSqlBinary(0).Value.ToArray(20); if (id != this._id) { throw new ComputedBlobIDMismatchError(id, this._id); } // Read the length of the contents: long datalength = dr.GetSqlInt64(1).Value; // Use the lambda to read from the contents stream: TResult result = this.read(new BlobReaderStream(dr, 2, length: datalength)); return(result); }
public Task <Errorable <BlobID>[]> DeleteBlobs(params BlobID[] ids) { if (ids == null) { throw new ArgumentNullException("ids"); } if (ids.Length == 0) { return(Task.FromResult(new Errorable <BlobID> [0])); } // Delete each blob synchronously: Errorable <BlobID>[] results = new Errorable <BlobID> [ids.Length]; for (int i = 0; i < ids.Length; ++i) { BlobID id = ids[i]; results[i] = deleteBlob(id); } // TODO: Run through all the 'objects' directories and prune empty ones. // Too eager? Could cause conflicts with other threads. return(Task.FromResult(results)); }
void TestQueryBlobs() { // Create some Blobs: Blob bl0 = new Blob.Builder(Encoding.UTF8.GetBytes("Sample README content.")); Console.WriteLine(bl0.ID.ToString()); Blob bl1 = new Blob.Builder(Encoding.UTF8.GetBytes("Sample content.")); Console.WriteLine(bl1.ID.ToString()); var blobs = new ImmutableContainer <BlobID, Blob>(bl => bl.ID, bl0, bl1); var db = getDataContext(); // Check which blobs exist already: var qBlobs = db.ExecuteListQueryAsync(new QueryBlobsExist(bl0.ID, bl1.ID), expectedCapacity: blobs.Count); qBlobs.Wait(); // Find the blobs to persist: var blobIDsToPersist = blobs.Keys.Except(qBlobs.Result).ToArray(); // Persist each blob asynchronously: Task <Blob>[] persists = new Task <Blob> [blobIDsToPersist.Length]; for (int i = 0; i < blobIDsToPersist.Length; ++i) { BlobID id = blobIDsToPersist[i]; Console.WriteLine("PERSIST {0}", id.ToString()); persists[i] = db.ExecuteNonQueryAsync(new PersistBlob(blobs[id])); } Console.WriteLine("Waiting for persists..."); Task.WaitAll(persists); Console.WriteLine("Complete."); }
public CanonicalBlobIDPath(CanonicalBlobPath path, BlobID blobID) { this.Path = path; this.BlobID = blobID; }
public bool ContainsKey(BlobID id) { return _container.ContainsKey(id); }
private async Task<Errorable<IStreamedBlob>> persistBlob(PersistingBlob blob) { Debug.WriteLine(String.Format("Starting persistence of blob")); // Find a temporary filename: FileInfo tmpPath = system.getTemporaryFile(); long length = -1; BlobID blid; // Open a new stream to the source blob contents: using (var sr = blob.Stream) { length = sr.Length; // Create a new file and set its length so we can asynchronously write to it: using (var tmpFi = File.Open(tmpPath.FullName, FileMode.CreateNew, FileAccess.Write, FileShare.None)) { Debug.WriteLine(String.Format("New BLOB temp '{0}' length {1}", tmpPath.FullName, length)); tmpFi.SetLength(length); tmpFi.Close(); } // Determine the best buffer size to use for writing contents: int bufSize = Math.Min(Math.Max((int)length, 8), largeBufferSize); // Open a new FileStream to asynchronously write the blob contents: using (var fs = new FileStream(tmpPath.FullName, FileMode.Open, FileAccess.Write, FileShare.Read, bufSize, useAsync: true)) using (var sha1 = new SHA1StreamWriter(fs)) { // Copy the contents asynchronously (expected copy in order): await sr.CopyToAsync(sha1, bufSize).ConfigureAwait(continueOnCapturedContext: false); // Create the BlobID from the SHA1 hash calculated during copy: blid = new BlobID(sha1.GetHash()); } } // Serialize access to the official blob file: lock (FileSystem.SystemLock) { // Create the blob's subdirectory under 'objects': FileInfo path = system.getPathByID(blid); path.Refresh(); if (!path.Directory.Exists) { Debug.WriteLine(String.Format("New DIR '{0}'", path.Directory.FullName)); path.Directory.Create(); } // Don't recreate an existing blob: if (path.Exists) { Debug.WriteLine(String.Format("Blob already exists at path '{0}', deleting temporary...", path.FullName)); tmpPath.Delete(); return new Errorable<IStreamedBlob>((IStreamedBlob)new StreamedBlob(this, blid, length)); } // Move the temp file to the final blob filename: File.Move(tmpPath.FullName, path.FullName); } return new Errorable<IStreamedBlob>((IStreamedBlob)new StreamedBlob(this, blid, length)); }
public bool ContainsKey(BlobID id) { return(_container.ContainsKey(id)); }
private Task<Errorable<IStreamedBlob>> getBlob(BlobID id) { var fi = system.getPathByID(id); if (!fi.Exists) return Task.FromResult((Errorable<IStreamedBlob>)new BlobIDRecordDoesNotExistError(id)); return Task.FromResult(new Errorable<IStreamedBlob>((IStreamedBlob)new StreamedBlob(this, id, fi.Length))); }
public Task<Errorable<IStreamedBlob>> GetBlob(BlobID id) { return getBlob(id); }
public ComputedBlobIDMismatchError(BlobID computedID, BlobID expectedID) : base("Computed BlobID {0} does not match expected BlobID {1}", computedID, expectedID) { }
public Task <Errorable <IStreamedBlob> > GetBlob(BlobID id) { return(Task.FromResult((Errorable <IStreamedBlob>) new StreamedBlob(this, id))); }
public BlobIDPartialAmbiguousResolutionError(BlobID.Partial id, params BlobID[] ids) : base("Partial BlobID {0} resolves to multiple BlobIDs", id, ids) { }
public BlobIDPartialNoResolutionError(BlobID.Partial id) : base("Partial BlobID {0} does not resolve to a BlobID", id) { }
internal static HandleRef getCPtr(BlobID obj) { return (obj == null) ? new HandleRef(null, IntPtr.Zero) : obj.swigCPtr; }
private async Task <Errorable <IStreamedBlob> > persistBlob(PersistingBlob blob) { Debug.WriteLine(String.Format("Starting persistence of blob")); // Find a temporary filename: FileInfo tmpPath = system.getTemporaryFile(); long length = -1; BlobID blid; // Open a new stream to the source blob contents: using (var sr = blob.Stream) { length = sr.Length; // Create a new file and set its length so we can asynchronously write to it: using (var tmpFi = File.Open(tmpPath.FullName, FileMode.CreateNew, FileAccess.Write, FileShare.None)) { Debug.WriteLine(String.Format("New BLOB temp '{0}' length {1}", tmpPath.FullName, length)); tmpFi.SetLength(length); tmpFi.Close(); } // Determine the best buffer size to use for writing contents: int bufSize = Math.Min(Math.Max((int)length, 8), largeBufferSize); // Open a new FileStream to asynchronously write the blob contents: using (var fs = new FileStream(tmpPath.FullName, FileMode.Open, FileAccess.Write, FileShare.Read, bufSize, useAsync: true)) using (var sha1 = new SHA1StreamWriter(fs)) { // Copy the contents asynchronously (expected copy in order): await sr.CopyToAsync(sha1, bufSize).ConfigureAwait(continueOnCapturedContext: false); // Create the BlobID from the SHA1 hash calculated during copy: blid = new BlobID(sha1.GetHash()); } } // Serialize access to the official blob file: lock (FileSystem.SystemLock) { // Create the blob's subdirectory under 'objects': FileInfo path = system.getPathByID(blid); path.Refresh(); if (!path.Directory.Exists) { Debug.WriteLine(String.Format("New DIR '{0}'", path.Directory.FullName)); path.Directory.Create(); } // Don't recreate an existing blob: if (path.Exists) { Debug.WriteLine(String.Format("Blob already exists at path '{0}', deleting temporary...", path.FullName)); tmpPath.Delete(); return(new Errorable <IStreamedBlob>((IStreamedBlob) new StreamedBlob(this, blid, length))); } // Move the temp file to the final blob filename: File.Move(tmpPath.FullName, path.FullName); } return(new Errorable <IStreamedBlob>((IStreamedBlob) new StreamedBlob(this, blid, length))); }
internal FileInfo[] getPathsByPartialID(BlobID.Partial partial) { DirectoryInfo objDir = getObjectsDirectory(); string idStr = partial.ToString(); string path = System.IO.Path.Combine(objDir.FullName, idStr.Substring(0, 2)); var di = new DirectoryInfo(path); if (!di.Exists) return new FileInfo[0]; return di.GetFiles(idStr.Substring(2) + "*"); }
internal async Task StreamedBlobTest() { const int numBlobs = 1; PersistingBlob[] blobs = createBlobs(numBlobs); // Persist the blobs: var streamedBlobs = await blrepo.PersistBlobs(blobs); // Load a streamed blob: Console.WriteLine("Awaiting fetch of streamed blob."); Console.WriteLine("Awaiting ReadStream to complete..."); await streamedBlobs[0].Value.ReadStreamAsync(async blsr => { Console.WriteLine("blob is {0} length", blsr.Length); SHA1 sha1 = SHA1.Create(); const int bufsize = 8040 * 8 * 4; byte[] dum = new byte[bufsize]; int count = bufsize; try { while ((count = blsr.Read(dum, 0, bufsize)) > 0) { sha1.TransformBlock(dum, 0, count, null, 0); //for (int i = 0; i < (count / 40) + ((count % 40) > 0 ? 1 : 0); ++i) //{ // Console.WriteLine(dum.ToHexString(i * 40, Math.Min(count - (i * 40), 40))); //} } sha1.TransformFinalBlock(dum, 0, 0); byte[] hash = sha1.Hash; BlobID retrievedID = new BlobID(hash); Console.WriteLine("SHA1 is {0}", hash.ToHexString(0, hash.Length)); } catch (Exception ex) { Console.WriteLine(ex.ToString()); } return Errorable.NoErrors; }); }
public TreeBlobReference(Builder b) { this.Name = b.Name; this.BlobID = b.BlobID; }
public Task <Errorable <IStreamedBlob> > GetBlob(BlobID id) { return(getBlob(id)); }
public Builder(TreeBlobReference imm) { this.Name = imm.Name; this.BlobID = imm.BlobID; }
private Errorable<BlobID> deleteBlob(BlobID id) { FileInfo path = system.getPathByID(id); lock (FileSystem.SystemLock) { if (!path.Exists) return new BlobIDRecordDoesNotExistError(id); path.Delete(); } return id; }
public Builder( string pName ,BlobID pBlobID ) { this.Name = pName; this.BlobID = pBlobID; }
public Task<Errorable<BlobID>> DeleteBlob(BlobID id) { return Task.FromResult(deleteBlob(id)); }
public BlobIDRecordDoesNotExistError(BlobID blobID) : base("A blob with BlobID {0} does not exist", blobID) { }
public Task<Errorable<BlobID>> ResolvePartialID(BlobID.Partial id) { FileInfo[] fis = system.getPathsByPartialID(id); if (fis.Length == 1) return Task.FromResult( BlobID.TryParse(id.ToString().Substring(0, 2) + fis[0].Name) ); if (fis.Length == 0) return Task.FromResult( (Errorable<BlobID>) new BlobIDPartialNoResolutionError(id) ); return Task.FromResult( (Errorable<BlobID>) new BlobIDPartialAmbiguousResolutionError(id, fis.SelectAsArray(f => BlobID.TryParse(id.ToString().Substring(0, 2) + f.Name).Value)) ); }
public Blob this[BlobID id] { get { return(_container[id]); } }
public QueryStreamedBlob(BlobID id, Func <System.IO.Stream, Task <TResult> > readAsync) { this._id = id; this.readAsync = readAsync; }
public bool TryGetValue(BlobID id, out Blob value) { return(_container.TryGetValue(id, out value)); }
public QueryStreamedBlob(BlobID id, Func <System.IO.Stream, TResult> read) { this._id = id; this.read = read; }
public Blob this[BlobID id] { get { return _container[id]; } }
public bool TryGetValue(BlobID id, out Blob value) { return _container.TryGetValue(id, out value); }
public Task <Errorable <BlobID> > DeleteBlob(BlobID id) { return(db.ExecuteNonQueryAsync(new DestroyBlob(id))); }
public QueryBlob(StreamedBlobRepository blrepo, BlobID id) { this._blrepo = blrepo; this._id = id; }
public Task <Errorable <BlobID> > DeleteBlob(BlobID id) { return(Task.FromResult(deleteBlob(id))); }