/// <summary> /// Opens a stream for the specified <see cref="ObjectId"/>. /// </summary> /// <param name="objectId">The object identifier.</param> /// <param name="mode">The mode.</param> /// <param name="access">The access.</param> /// <param name="share">The share.</param> /// <returns>A Stream.</returns> /// <exception cref="System.InvalidOperationException">Read-only object database.</exception> public Stream OpenStream(ObjectId objectId, VirtualFileMode mode = VirtualFileMode.Open, VirtualFileAccess access = VirtualFileAccess.Read, VirtualFileShare share = VirtualFileShare.Read) { if (access == VirtualFileAccess.Read) { return(OpenStreamForRead(objectId, mode, access, share)); } if (backendWrite == null) { throw new InvalidOperationException("Read-only object database."); } if (backendRead1 == backendWrite) { return(backendWrite.OpenStream(objectId, mode, access, share)); } else { using (var streamRead = OpenStreamForRead(objectId, VirtualFileMode.Open, VirtualFileAccess.ReadWrite, VirtualFileShare.ReadWrite)) { var stream = backendWrite.OpenStream(objectId, mode, access, share); streamRead?.CopyTo(stream); stream.Position = 0; return(stream); } } }
private Stream OpenStreamForRead(ObjectId objectId, VirtualFileMode mode, VirtualFileAccess access, VirtualFileShare share) { if (BundleBackend != null && BundleBackend.Exists(objectId)) { return(BundleBackend.OpenStream(objectId, mode, access, share)); } if (backendRead1.Exists(objectId)) { return(backendRead1.OpenStream(objectId, mode, access, share)); } if (backendRead2 != null && backendRead2.Exists(objectId)) { return(backendRead2.OpenStream(objectId, mode, access, share)); } throw new FileNotFoundException(); }
private Stream OpenStreamForRead(ObjectId objectId, VirtualFileMode mode, VirtualFileAccess access, VirtualFileShare share) { if (bundleBackend != null && bundleBackend.Exists(objectId)) { return(bundleBackend.OpenStream(objectId, mode, access, share)); } if (backendRead1.Exists(objectId)) { return(backendRead1.OpenStream(objectId, mode, access, share)); } if (backendRead2 != null && backendRead2.Exists(objectId)) { return(backendRead2.OpenStream(objectId, mode, access, share)); } return(null); }
public static void CreateBundle(string bundleUrl, IOdbBackend backend, ObjectId[] objectIds, ISet <ObjectId> disableCompressionIds, Dictionary <string, ObjectId> indexMap, IList <string> dependencies, bool useIncrementalBundle) { if (objectIds.Length == 0) { throw new InvalidOperationException("Nothing to pack."); } var objectsToIndex = new Dictionary <ObjectId, int>(objectIds.Length); var objects = new List <KeyValuePair <ObjectId, ObjectInfo> >(); for (int i = 0; i < objectIds.Length; ++i) { objectsToIndex.Add(objectIds[i], objects.Count); objects.Add(new KeyValuePair <ObjectId, ObjectInfo>(objectIds[i], new ObjectInfo())); } var incrementalBundles = new List <ObjectId>(); // If there is a .bundle, add incremental id before it var bundleExtensionLength = (bundleUrl.EndsWith(BundleExtension) ? BundleExtension.Length : 0); // Early exit if package didn't change (header-check only) if (VirtualFileSystem.FileExists(bundleUrl)) { try { using (var packStream = VirtualFileSystem.OpenStream(bundleUrl, VirtualFileMode.Open, VirtualFileAccess.Read)) { var bundle = ReadBundleDescription(packStream); // If package didn't change since last time, early exit! if (ArrayExtensions.ArraysEqual(bundle.Dependencies, dependencies) && ArrayExtensions.ArraysEqual(bundle.Assets.OrderBy(x => x.Key).ToList(), indexMap.OrderBy(x => x.Key).ToList()) && ArrayExtensions.ArraysEqual(bundle.Objects.Select(x => x.Key).OrderBy(x => x).ToList(), objectIds.OrderBy(x => x).ToList())) { // Make sure all incremental bundles exist // Also, if we don't want incremental bundles but we have some (or vice-versa), let's force a regeneration if ((useIncrementalBundle == (bundle.IncrementalBundles.Count > 0)) && bundle.IncrementalBundles.Select(x => bundleUrl.Insert(bundleUrl.Length - bundleExtensionLength, "." + x)).All(x => { if (!VirtualFileSystem.FileExists(x)) { return(false); } using (var incrementalStream = VirtualFileSystem.OpenStream(x, VirtualFileMode.Open, VirtualFileAccess.Read)) return(ValidateHeader(incrementalStream)); })) { return; } } } // Process existing incremental bundles one by one // Try to find if there is enough to reuse in each of them var filename = VirtualFileSystem.GetFileName(bundleUrl); var directory = VirtualFileSystem.GetParentFolder(bundleUrl); foreach (var incrementalBundleUrl in VirtualFileSystem.ListFiles(directory, filename.Insert(filename.Length - bundleExtensionLength, ".*"), VirtualSearchOption.TopDirectoryOnly).Result) { var incrementalIdString = incrementalBundleUrl.Substring(incrementalBundleUrl.Length - bundleExtensionLength - ObjectId.HashStringLength, ObjectId.HashStringLength); ObjectId incrementalId; if (!ObjectId.TryParse(incrementalIdString, out incrementalId)) { continue; } // If we don't want incremental bundles, delete old ones from previous build if (!useIncrementalBundle) { VirtualFileSystem.FileDelete(incrementalBundleUrl); continue; } long sizeNeededItems = 0; long sizeTotal = 0; BundleDescription incrementalBundle; try { using (var packStream = VirtualFileSystem.OpenStream(incrementalBundleUrl, VirtualFileMode.Open, VirtualFileAccess.Read)) { incrementalBundle = ReadBundleDescription(packStream); } // Compute size of objects (needed ones and everything) foreach (var @object in incrementalBundle.Objects) { var objectCompressedSize = @object.Value.EndOffset - @object.Value.StartOffset; // TODO: Detect object that are stored without ObjectId being content hash: we need to check actual content hash is same in this case if (objectsToIndex.ContainsKey(@object.Key)) { sizeNeededItems += objectCompressedSize; } sizeTotal += objectCompressedSize; } // Check if we would reuse at least 50% of the incremental bundle, otherwise let's just get rid of it var reuseRatio = (float)((double)sizeNeededItems / (double)sizeTotal); if (reuseRatio < 0.5f) { VirtualFileSystem.FileDelete(incrementalBundleUrl); } else { // We will reuse this incremental bundle // Let's add ObjectId entries foreach (var @object in incrementalBundle.Objects) { int objectIndex; if (objectsToIndex.TryGetValue(@object.Key, out objectIndex)) { var objectInfo = @object.Value; objectInfo.IncrementalBundleIndex = incrementalBundles.Count + 1; objects[objectIndex] = new KeyValuePair <ObjectId, ObjectInfo>(@object.Key, objectInfo); } } // Add this incremental bundle in the list incrementalBundles.Add(incrementalId); } } catch (Exception) { // Could not read incremental bundle (format changed?) // Let's delete it VirtualFileSystem.FileDelete(incrementalBundleUrl); } } } catch (Exception) { // Could not read previous bundle (format changed?) // Let's just mute this error as new bundle will overwrite it anyway } } // Count objects which needs to be saved var incrementalObjects = new List <KeyValuePair <ObjectId, ObjectInfo> >(); if (useIncrementalBundle) { for (int i = 0; i < objectIds.Length; ++i) { // Skip if already part of an existing incremental package if (objects[i].Value.IncrementalBundleIndex > 0) { continue; } incrementalObjects.Add(new KeyValuePair <ObjectId, ObjectInfo>(objects[i].Key, new ObjectInfo())); } } // Create an incremental package var newIncrementalId = ObjectId.New(); var incrementalBundleIndex = incrementalBundles.Count; if (useIncrementalBundle && incrementalObjects.Count > 0) { incrementalBundles.Add(newIncrementalId); } using (var packStream = VirtualFileSystem.OpenStream(bundleUrl, VirtualFileMode.Create, VirtualFileAccess.Write)) { var header = new Header(); header.MagicHeader = Header.MagicHeaderValid; var packBinaryWriter = new BinarySerializationWriter(packStream); packBinaryWriter.Write(header); // Write dependencies packBinaryWriter.Write(dependencies.ToList()); // Write inecremental bundles packBinaryWriter.Write(incrementalBundles.ToList()); // Save location of object ids var packObjectIdPosition = packStream.Position; // Write empty object ids (reserve space, will be rewritten later) packBinaryWriter.Write(objects); // Write index packBinaryWriter.Write(indexMap.ToList()); using (var incrementalStream = incrementalObjects.Count > 0 ? VirtualFileSystem.OpenStream(bundleUrl.Insert(bundleUrl.Length - bundleExtensionLength, "." + newIncrementalId), VirtualFileMode.Create, VirtualFileAccess.Write) : null) { var incrementalBinaryWriter = incrementalStream != null ? new BinarySerializationWriter(incrementalStream) : null; long incrementalObjectIdPosition = 0; if (incrementalStream != null) { incrementalBinaryWriter.Write(header); // Write dependencies incrementalBinaryWriter.Write(new List <string>()); // Write inecremental bundles incrementalBinaryWriter.Write(new List <ObjectId>()); // Save location of object ids incrementalObjectIdPosition = incrementalStream.Position; // Write empty object ids (reserve space, will be rewritten later) incrementalBinaryWriter.Write(incrementalObjects); // Write index incrementalBinaryWriter.Write(new List <KeyValuePair <string, ObjectId> >()); } var objectOutputStream = incrementalStream ?? packStream; int incrementalObjectIndex = 0; for (int i = 0; i < objectIds.Length; ++i) { // Skip if already part of an existing incremental package if (objects[i].Value.IncrementalBundleIndex > 0) { continue; } using (var objectStream = backend.OpenStream(objectIds[i])) { // Prepare object info var objectInfo = new ObjectInfo { StartOffset = objectOutputStream.Position, SizeNotCompressed = objectStream.Length }; // re-order the file content so that it is not necessary to seek while reading the input stream (header/object/refs -> header/refs/object) var inputStream = objectStream; var originalStreamLength = objectStream.Length; var streamReader = new BinarySerializationReader(inputStream); var chunkHeader = ChunkHeader.Read(streamReader); if (chunkHeader != null) { // create the reordered stream var reorderedStream = new MemoryStream((int)originalStreamLength); // copy the header var streamWriter = new BinarySerializationWriter(reorderedStream); chunkHeader.Write(streamWriter); // copy the references var newOffsetReferences = reorderedStream.Position; inputStream.Position = chunkHeader.OffsetToReferences; inputStream.CopyTo(reorderedStream); // copy the object var newOffsetObject = reorderedStream.Position; inputStream.Position = chunkHeader.OffsetToObject; inputStream.CopyTo(reorderedStream, chunkHeader.OffsetToReferences - chunkHeader.OffsetToObject); // rewrite the chunk header with correct offsets chunkHeader.OffsetToObject = (int)newOffsetObject; chunkHeader.OffsetToReferences = (int)newOffsetReferences; reorderedStream.Position = 0; chunkHeader.Write(streamWriter); // change the input stream to use reordered stream inputStream = reorderedStream; inputStream.Position = 0; } // compress the stream if (!disableCompressionIds.Contains(objectIds[i])) { objectInfo.IsCompressed = true; var lz4OutputStream = new LZ4Stream(objectOutputStream, CompressionMode.Compress); inputStream.CopyTo(lz4OutputStream); lz4OutputStream.Flush(); } // copy the stream "as is" else { // Write stream inputStream.CopyTo(objectOutputStream); } // release the reordered created stream if (chunkHeader != null) { inputStream.Dispose(); } // Add updated object info objectInfo.EndOffset = objectOutputStream.Position; // Note: we add 1 because 0 is reserved for self; first incremental bundle starts at 1 objectInfo.IncrementalBundleIndex = objectOutputStream == incrementalStream ? incrementalBundleIndex + 1 : 0; objects[i] = new KeyValuePair <ObjectId, ObjectInfo>(objectIds[i], objectInfo); if (useIncrementalBundle) { // Also update incremental bundle object info objectInfo.IncrementalBundleIndex = 0; // stored in same bundle incrementalObjects[incrementalObjectIndex++] = new KeyValuePair <ObjectId, ObjectInfo>(objectIds[i], objectInfo); } } } // First finish to write incremental package so that main one can't be valid on the HDD without the incremental one being too if (incrementalStream != null) { // Rewrite headers header.Size = incrementalStream.Length; incrementalStream.Position = 0; incrementalBinaryWriter.Write(header); // Rewrite object with updated offsets/size incrementalStream.Position = incrementalObjectIdPosition; incrementalBinaryWriter.Write(incrementalObjects); } } // Rewrite headers header.Size = packStream.Length; packStream.Position = 0; packBinaryWriter.Write(header); // Rewrite object with updated offsets/size packStream.Position = packObjectIdPosition; packBinaryWriter.Write(objects); } }
public static void CreateBundle(string vfsUrl, IOdbBackend backend, ObjectId[] objectIds, ISet <ObjectId> disableCompressionIds, Dictionary <string, ObjectId> indexMap, IList <string> dependencies) { if (objectIds.Length == 0) { throw new InvalidOperationException("Nothing to pack."); } // Early exit if package didn't change (header-check only) if (VirtualFileSystem.FileExists(vfsUrl)) { try { using (var packStream = VirtualFileSystem.OpenStream(vfsUrl, VirtualFileMode.Open, VirtualFileAccess.Read)) { var bundle = ReadBundleDescription(packStream); // If package didn't change since last time, early exit! if (ArrayExtensions.ArraysEqual(bundle.Dependencies, dependencies) && ArrayExtensions.ArraysEqual(bundle.Assets.OrderBy(x => x.Key).ToList(), indexMap.OrderBy(x => x.Key).ToList()) && ArrayExtensions.ArraysEqual(bundle.Objects.Select(x => x.Key).OrderBy(x => x).ToList(), objectIds.OrderBy(x => x).ToList())) { return; } } } catch (Exception) { // Could not read previous bundle (format changed?) // Let's just mute this error as new bundle will overwrite it anyway } } using (var packStream = VirtualFileSystem.OpenStream(vfsUrl, VirtualFileMode.Create, VirtualFileAccess.Write)) { var header = new Header(); header.MagicHeader = Header.MagicHeaderValid; var binaryWriter = new BinarySerializationWriter(packStream); binaryWriter.Write(header); // Write dependencies binaryWriter.Write(dependencies.ToList()); // Save location of object ids var objectIdPosition = packStream.Position; // Write empty object ids (reserve space, will be rewritten later) var objects = new List <KeyValuePair <ObjectId, ObjectInfo> >(); for (int i = 0; i < objectIds.Length; ++i) { objects.Add(new KeyValuePair <ObjectId, ObjectInfo>(objectIds[i], new ObjectInfo())); } binaryWriter.Write(objects); objects.Clear(); // Write index binaryWriter.Write(indexMap.ToList()); for (int i = 0; i < objectIds.Length; ++i) { using (var objectStream = backend.OpenStream(objectIds[i])) { // Prepare object info var objectInfo = new ObjectInfo { StartOffset = packStream.Position, SizeNotCompressed = objectStream.Length }; // re-order the file content so that it is not necessary to seek while reading the input stream (header/object/refs -> header/refs/object) var inputStream = objectStream; var originalStreamLength = objectStream.Length; var streamReader = new BinarySerializationReader(inputStream); var chunkHeader = ChunkHeader.Read(streamReader); if (chunkHeader != null) { // create the reordered stream var reorderedStream = new MemoryStream((int)originalStreamLength); // copy the header var streamWriter = new BinarySerializationWriter(reorderedStream); chunkHeader.Write(streamWriter); // copy the references var newOffsetReferences = reorderedStream.Position; inputStream.Position = chunkHeader.OffsetToReferences; inputStream.CopyTo(reorderedStream); // copy the object var newOffsetObject = reorderedStream.Position; inputStream.Position = chunkHeader.OffsetToObject; inputStream.CopyTo(reorderedStream, chunkHeader.OffsetToReferences - chunkHeader.OffsetToObject); // rewrite the chunk header with correct offsets chunkHeader.OffsetToObject = (int)newOffsetObject; chunkHeader.OffsetToReferences = (int)newOffsetReferences; reorderedStream.Position = 0; chunkHeader.Write(streamWriter); // change the input stream to use reordered stream inputStream = reorderedStream; inputStream.Position = 0; } // compress the stream if (!disableCompressionIds.Contains(objectIds[i])) { objectInfo.IsCompressed = true; var lz4OutputStream = new LZ4Stream(packStream, CompressionMode.Compress); inputStream.CopyTo(lz4OutputStream); lz4OutputStream.Flush(); } else // copy the stream "as is" { // Write stream inputStream.CopyTo(packStream); } // release the reordered created stream if (chunkHeader != null) { inputStream.Dispose(); } // Add updated object info objectInfo.EndOffset = packStream.Position; objects.Add(new KeyValuePair <ObjectId, ObjectInfo>(objectIds[i], objectInfo)); } } // Rewrite header header.Size = packStream.Length; packStream.Position = 0; binaryWriter.Write(header); // Rewrite object locations packStream.Position = objectIdPosition; binaryWriter.Write(objects); } }