Exemplo n.º 1
0
        /// <summary>
        /// Initializes a new instance of the <see cref="ObjectDatabase" /> class.
        /// </summary>
        /// <param name="vfsMainUrl">The VFS main URL.</param>
        /// <param name="indexName">Name of the index file.</param>
        /// <param name="vfsAdditionalUrl">The VFS additional URL. It will be used only if vfsMainUrl is read-only.</param>
        public ObjectDatabase(string vfsMainUrl, string indexName, string vfsAdditionalUrl = null, bool loadDefaultBundle = true)
        {
            if (vfsMainUrl == null)
            {
                throw new ArgumentNullException(nameof(vfsMainUrl));
            }

            // Create the merged asset index map
            ContentIndexMap = new ObjectDatabaseContentIndexMap();

            // Try to open file backends
            bool isReadOnly = Platform.Type != PlatformType.Windows;
            var  backend    = new FileOdbBackend(vfsMainUrl, indexName, isReadOnly);

            ContentIndexMap.Merge(backend.ContentIndexMap);
            if (backend.IsReadOnly)
            {
                backendRead1 = backend;
                if (vfsAdditionalUrl != null)
                {
                    backendWrite = backendRead2 = new FileOdbBackend(vfsAdditionalUrl, indexName, false);
                    ContentIndexMap.Merge(backendWrite.ContentIndexMap);
                }
            }
            else
            {
                backendWrite = backendRead1 = backend;
            }

            ContentIndexMap.WriteableContentIndexMap = backendWrite.ContentIndexMap;

            BundleBackend = new BundleOdbBackend(vfsMainUrl);

            // Try to open "default" pack file synchronously
            if (loadDefaultBundle)
            {
                try
                {
                    BundleBackend.LoadBundle("default", ContentIndexMap).GetAwaiter().GetResult();
                }
                catch (FileNotFoundException)
                {
                }
            }
        }
Exemplo n.º 2
0
        /// <summary>
        /// Initializes a new instance of the <see cref="ObjectDatabase" /> class.
        /// </summary>
        /// <param name="vfsMainUrl">The VFS main URL.</param>
        /// <param name="indexName">Name of the index file.</param>
        /// <param name="vfsAdditionalUrl">The VFS additional URL. It will be used only if vfsMainUrl is read-only.</param>
        public ObjectDatabase(string vfsMainUrl, string indexName, string vfsAdditionalUrl = null, bool loadDefaultBundle = true)
        {
            if (vfsMainUrl == null) throw new ArgumentNullException(nameof(vfsMainUrl));

            // Create the merged asset index map
            ContentIndexMap = new ObjectDatabaseContentIndexMap();

            // Try to open file backends
            bool isReadOnly = Platform.Type != PlatformType.Windows;
            var backend = new FileOdbBackend(vfsMainUrl, indexName, isReadOnly);

            ContentIndexMap.Merge(backend.ContentIndexMap);
            if (backend.IsReadOnly)
            {
                backendRead1 = backend;
                if (vfsAdditionalUrl != null)
                {
                    backendWrite = backendRead2 = new FileOdbBackend(vfsAdditionalUrl, indexName, false);
                    ContentIndexMap.Merge(backendWrite.ContentIndexMap);
                }
            }
            else
            {
                backendWrite = backendRead1 = backend;
            }

            ContentIndexMap.WriteableContentIndexMap = backendWrite.ContentIndexMap;

            BundleBackend = new BundleOdbBackend(vfsMainUrl);

            // Try to open "default" pack file synchronously
            if (loadDefaultBundle)
            {
                try
                {
                    BundleBackend.LoadBundle("default", ContentIndexMap).GetAwaiter().GetResult();
                }
                catch (FileNotFoundException)
                {
                }
            }
        }
Exemplo n.º 3
0
        public static void CreateBundle(string bundleUrl, IOdbBackend backend, ObjectId[] objectIds, ISet <ObjectId> disableCompressionIds, Dictionary <string, ObjectId> indexMap, IList <string> dependencies, bool useIncrementalBundle)
        {
            if (objectIds.Length == 0)
            {
                throw new InvalidOperationException("Nothing to pack.");
            }

            var objectsToIndex = new Dictionary <ObjectId, int>(objectIds.Length);

            var objects = new List <KeyValuePair <ObjectId, ObjectInfo> >();

            for (int i = 0; i < objectIds.Length; ++i)
            {
                objectsToIndex.Add(objectIds[i], objects.Count);
                objects.Add(new KeyValuePair <ObjectId, ObjectInfo>(objectIds[i], new ObjectInfo()));
            }

            var incrementalBundles = new List <ObjectId>();

            // If there is a .bundle, add incremental id before it
            var bundleExtensionLength = (bundleUrl.EndsWith(BundleExtension) ? BundleExtension.Length : 0);

            // Early exit if package didn't change (header-check only)
            if (VirtualFileSystem.FileExists(bundleUrl))
            {
                try
                {
                    using (var packStream = VirtualFileSystem.OpenStream(bundleUrl, VirtualFileMode.Open, VirtualFileAccess.Read))
                    {
                        var bundle = ReadBundleDescription(packStream);

                        // If package didn't change since last time, early exit!
                        if (ArrayExtensions.ArraysEqual(bundle.Dependencies, dependencies) &&
                            ArrayExtensions.ArraysEqual(bundle.Assets.OrderBy(x => x.Key).ToList(), indexMap.OrderBy(x => x.Key).ToList()) &&
                            ArrayExtensions.ArraysEqual(bundle.Objects.Select(x => x.Key).OrderBy(x => x).ToList(), objectIds.OrderBy(x => x).ToList()))
                        {
                            // Make sure all incremental bundles exist
                            // Also, if we don't want incremental bundles but we have some (or vice-versa), let's force a regeneration
                            if ((useIncrementalBundle == (bundle.IncrementalBundles.Count > 0)) &&
                                bundle.IncrementalBundles.Select(x => bundleUrl.Insert(bundleUrl.Length - bundleExtensionLength, "." + x)).All(x =>
                            {
                                if (!VirtualFileSystem.FileExists(x))
                                {
                                    return(false);
                                }
                                using (var incrementalStream = VirtualFileSystem.OpenStream(x, VirtualFileMode.Open, VirtualFileAccess.Read))
                                    return(ValidateHeader(incrementalStream));
                            }))
                            {
                                return;
                            }
                        }
                    }

                    // Process existing incremental bundles one by one
                    // Try to find if there is enough to reuse in each of them
                    var filename  = VirtualFileSystem.GetFileName(bundleUrl);
                    var directory = VirtualFileSystem.GetParentFolder(bundleUrl);

                    foreach (var incrementalBundleUrl in VirtualFileSystem.ListFiles(directory, filename.Insert(filename.Length - bundleExtensionLength, ".*"), VirtualSearchOption.TopDirectoryOnly).Result)
                    {
                        var      incrementalIdString = incrementalBundleUrl.Substring(incrementalBundleUrl.Length - bundleExtensionLength - ObjectId.HashStringLength, ObjectId.HashStringLength);
                        ObjectId incrementalId;
                        if (!ObjectId.TryParse(incrementalIdString, out incrementalId))
                        {
                            continue;
                        }

                        // If we don't want incremental bundles, delete old ones from previous build
                        if (!useIncrementalBundle)
                        {
                            VirtualFileSystem.FileDelete(incrementalBundleUrl);
                            continue;
                        }

                        long sizeNeededItems = 0;
                        long sizeTotal       = 0;

                        BundleDescription incrementalBundle;
                        try
                        {
                            using (var packStream = VirtualFileSystem.OpenStream(incrementalBundleUrl, VirtualFileMode.Open, VirtualFileAccess.Read))
                            {
                                incrementalBundle = ReadBundleDescription(packStream);
                            }

                            // Compute size of objects (needed ones and everything)
                            foreach (var @object in incrementalBundle.Objects)
                            {
                                var objectCompressedSize = @object.Value.EndOffset - @object.Value.StartOffset;

                                // TODO: Detect object that are stored without ObjectId being content hash: we need to check actual content hash is same in this case
                                if (objectsToIndex.ContainsKey(@object.Key))
                                {
                                    sizeNeededItems += objectCompressedSize;
                                }
                                sizeTotal += objectCompressedSize;
                            }

                            // Check if we would reuse at least 50% of the incremental bundle, otherwise let's just get rid of it
                            var reuseRatio = (float)((double)sizeNeededItems / (double)sizeTotal);
                            if (reuseRatio < 0.5f)
                            {
                                VirtualFileSystem.FileDelete(incrementalBundleUrl);
                            }
                            else
                            {
                                // We will reuse this incremental bundle
                                // Let's add ObjectId entries
                                foreach (var @object in incrementalBundle.Objects)
                                {
                                    int objectIndex;
                                    if (objectsToIndex.TryGetValue(@object.Key, out objectIndex))
                                    {
                                        var objectInfo = @object.Value;
                                        objectInfo.IncrementalBundleIndex = incrementalBundles.Count + 1;
                                        objects[objectIndex] = new KeyValuePair <ObjectId, ObjectInfo>(@object.Key, objectInfo);
                                    }
                                }

                                // Add this incremental bundle in the list
                                incrementalBundles.Add(incrementalId);
                            }
                        }
                        catch (Exception)
                        {
                            // Could not read incremental bundle (format changed?)
                            // Let's delete it
                            VirtualFileSystem.FileDelete(incrementalBundleUrl);
                        }
                    }
                }
                catch (Exception)
                {
                    // Could not read previous bundle (format changed?)
                    // Let's just mute this error as new bundle will overwrite it anyway
                }
            }

            // Count objects which needs to be saved
            var incrementalObjects = new List <KeyValuePair <ObjectId, ObjectInfo> >();

            if (useIncrementalBundle)
            {
                for (int i = 0; i < objectIds.Length; ++i)
                {
                    // Skip if already part of an existing incremental package
                    if (objects[i].Value.IncrementalBundleIndex > 0)
                    {
                        continue;
                    }

                    incrementalObjects.Add(new KeyValuePair <ObjectId, ObjectInfo>(objects[i].Key, new ObjectInfo()));
                }
            }

            // Create an incremental package
            var newIncrementalId       = ObjectId.New();
            var incrementalBundleIndex = incrementalBundles.Count;

            if (useIncrementalBundle && incrementalObjects.Count > 0)
            {
                incrementalBundles.Add(newIncrementalId);
            }

            using (var packStream = VirtualFileSystem.OpenStream(bundleUrl, VirtualFileMode.Create, VirtualFileAccess.Write))
            {
                var header = new Header();
                header.MagicHeader = Header.MagicHeaderValid;

                var packBinaryWriter = new BinarySerializationWriter(packStream);
                packBinaryWriter.Write(header);
                // Write dependencies
                packBinaryWriter.Write(dependencies.ToList());
                // Write inecremental bundles
                packBinaryWriter.Write(incrementalBundles.ToList());

                // Save location of object ids
                var packObjectIdPosition = packStream.Position;

                // Write empty object ids (reserve space, will be rewritten later)
                packBinaryWriter.Write(objects);

                // Write index
                packBinaryWriter.Write(indexMap.ToList());

                using (var incrementalStream = incrementalObjects.Count > 0 ? VirtualFileSystem.OpenStream(bundleUrl.Insert(bundleUrl.Length - bundleExtensionLength, "." + newIncrementalId), VirtualFileMode.Create, VirtualFileAccess.Write) : null)
                {
                    var  incrementalBinaryWriter     = incrementalStream != null ? new BinarySerializationWriter(incrementalStream) : null;
                    long incrementalObjectIdPosition = 0;
                    if (incrementalStream != null)
                    {
                        incrementalBinaryWriter.Write(header);
                        // Write dependencies
                        incrementalBinaryWriter.Write(new List <string>());
                        // Write inecremental bundles
                        incrementalBinaryWriter.Write(new List <ObjectId>());

                        // Save location of object ids
                        incrementalObjectIdPosition = incrementalStream.Position;

                        // Write empty object ids (reserve space, will be rewritten later)
                        incrementalBinaryWriter.Write(incrementalObjects);

                        // Write index
                        incrementalBinaryWriter.Write(new List <KeyValuePair <string, ObjectId> >());
                    }

                    var objectOutputStream     = incrementalStream ?? packStream;
                    int incrementalObjectIndex = 0;
                    for (int i = 0; i < objectIds.Length; ++i)
                    {
                        // Skip if already part of an existing incremental package
                        if (objects[i].Value.IncrementalBundleIndex > 0)
                        {
                            continue;
                        }

                        using (var objectStream = backend.OpenStream(objectIds[i]))
                        {
                            // Prepare object info
                            var objectInfo = new ObjectInfo {
                                StartOffset = objectOutputStream.Position, SizeNotCompressed = objectStream.Length
                            };

                            // re-order the file content so that it is not necessary to seek while reading the input stream (header/object/refs -> header/refs/object)
                            var inputStream          = objectStream;
                            var originalStreamLength = objectStream.Length;
                            var streamReader         = new BinarySerializationReader(inputStream);
                            var chunkHeader          = ChunkHeader.Read(streamReader);
                            if (chunkHeader != null)
                            {
                                // create the reordered stream
                                var reorderedStream = new MemoryStream((int)originalStreamLength);

                                // copy the header
                                var streamWriter = new BinarySerializationWriter(reorderedStream);
                                chunkHeader.Write(streamWriter);

                                // copy the references
                                var newOffsetReferences = reorderedStream.Position;
                                inputStream.Position = chunkHeader.OffsetToReferences;
                                inputStream.CopyTo(reorderedStream);

                                // copy the object
                                var newOffsetObject = reorderedStream.Position;
                                inputStream.Position = chunkHeader.OffsetToObject;
                                inputStream.CopyTo(reorderedStream, chunkHeader.OffsetToReferences - chunkHeader.OffsetToObject);

                                // rewrite the chunk header with correct offsets
                                chunkHeader.OffsetToObject     = (int)newOffsetObject;
                                chunkHeader.OffsetToReferences = (int)newOffsetReferences;
                                reorderedStream.Position       = 0;
                                chunkHeader.Write(streamWriter);

                                // change the input stream to use reordered stream
                                inputStream          = reorderedStream;
                                inputStream.Position = 0;
                            }

                            // compress the stream
                            if (!disableCompressionIds.Contains(objectIds[i]))
                            {
                                objectInfo.IsCompressed = true;

                                var lz4OutputStream = new LZ4Stream(objectOutputStream, CompressionMode.Compress);
                                inputStream.CopyTo(lz4OutputStream);
                                lz4OutputStream.Flush();
                            }
                            // copy the stream "as is"
                            else
                            {
                                // Write stream
                                inputStream.CopyTo(objectOutputStream);
                            }

                            // release the reordered created stream
                            if (chunkHeader != null)
                            {
                                inputStream.Dispose();
                            }

                            // Add updated object info
                            objectInfo.EndOffset = objectOutputStream.Position;
                            // Note: we add 1 because 0 is reserved for self; first incremental bundle starts at 1
                            objectInfo.IncrementalBundleIndex = objectOutputStream == incrementalStream ? incrementalBundleIndex + 1 : 0;
                            objects[i] = new KeyValuePair <ObjectId, ObjectInfo>(objectIds[i], objectInfo);

                            if (useIncrementalBundle)
                            {
                                // Also update incremental bundle object info
                                objectInfo.IncrementalBundleIndex            = 0; // stored in same bundle
                                incrementalObjects[incrementalObjectIndex++] = new KeyValuePair <ObjectId, ObjectInfo>(objectIds[i], objectInfo);
                            }
                        }
                    }

                    // First finish to write incremental package so that main one can't be valid on the HDD without the incremental one being too
                    if (incrementalStream != null)
                    {
                        // Rewrite headers
                        header.Size = incrementalStream.Length;
                        incrementalStream.Position = 0;
                        incrementalBinaryWriter.Write(header);

                        // Rewrite object with updated offsets/size
                        incrementalStream.Position = incrementalObjectIdPosition;
                        incrementalBinaryWriter.Write(incrementalObjects);
                    }
                }

                // Rewrite headers
                header.Size         = packStream.Length;
                packStream.Position = 0;
                packBinaryWriter.Write(header);

                // Rewrite object with updated offsets/size
                packStream.Position = packObjectIdPosition;
                packBinaryWriter.Write(objects);
            }
        }
Exemplo n.º 4
0
        public static void CreateBundle(string vfsUrl, IOdbBackend backend, ObjectId[] objectIds, ISet <ObjectId> disableCompressionIds, Dictionary <string, ObjectId> indexMap, IList <string> dependencies)
        {
            if (objectIds.Length == 0)
            {
                throw new InvalidOperationException("Nothing to pack.");
            }

            // Early exit if package didn't change (header-check only)
            if (VirtualFileSystem.FileExists(vfsUrl))
            {
                try
                {
                    using (var packStream = VirtualFileSystem.OpenStream(vfsUrl, VirtualFileMode.Open, VirtualFileAccess.Read))
                    {
                        var bundle = ReadBundleDescription(packStream);

                        // If package didn't change since last time, early exit!
                        if (ArrayExtensions.ArraysEqual(bundle.Dependencies, dependencies) &&
                            ArrayExtensions.ArraysEqual(bundle.Assets.OrderBy(x => x.Key).ToList(), indexMap.OrderBy(x => x.Key).ToList()) &&
                            ArrayExtensions.ArraysEqual(bundle.Objects.Select(x => x.Key).OrderBy(x => x).ToList(), objectIds.OrderBy(x => x).ToList()))
                        {
                            return;
                        }
                    }
                }
                catch (Exception)
                {
                    // Could not read previous bundle (format changed?)
                    // Let's just mute this error as new bundle will overwrite it anyway
                }
            }

            using (var packStream = VirtualFileSystem.OpenStream(vfsUrl, VirtualFileMode.Create, VirtualFileAccess.Write))
            {
                var header = new Header();
                header.MagicHeader = Header.MagicHeaderValid;

                var binaryWriter = new BinarySerializationWriter(packStream);
                binaryWriter.Write(header);

                // Write dependencies
                binaryWriter.Write(dependencies.ToList());

                // Save location of object ids
                var objectIdPosition = packStream.Position;

                // Write empty object ids (reserve space, will be rewritten later)
                var objects = new List <KeyValuePair <ObjectId, ObjectInfo> >();
                for (int i = 0; i < objectIds.Length; ++i)
                {
                    objects.Add(new KeyValuePair <ObjectId, ObjectInfo>(objectIds[i], new ObjectInfo()));
                }

                binaryWriter.Write(objects);
                objects.Clear();

                // Write index
                binaryWriter.Write(indexMap.ToList());

                for (int i = 0; i < objectIds.Length; ++i)
                {
                    using (var objectStream = backend.OpenStream(objectIds[i]))
                    {
                        // Prepare object info
                        var objectInfo = new ObjectInfo {
                            StartOffset = packStream.Position, SizeNotCompressed = objectStream.Length
                        };

                        // re-order the file content so that it is not necessary to seek while reading the input stream (header/object/refs -> header/refs/object)
                        var inputStream          = objectStream;
                        var originalStreamLength = objectStream.Length;
                        var streamReader         = new BinarySerializationReader(inputStream);
                        var chunkHeader          = ChunkHeader.Read(streamReader);
                        if (chunkHeader != null)
                        {
                            // create the reordered stream
                            var reorderedStream = new MemoryStream((int)originalStreamLength);

                            // copy the header
                            var streamWriter = new BinarySerializationWriter(reorderedStream);
                            chunkHeader.Write(streamWriter);

                            // copy the references
                            var newOffsetReferences = reorderedStream.Position;
                            inputStream.Position = chunkHeader.OffsetToReferences;
                            inputStream.CopyTo(reorderedStream);

                            // copy the object
                            var newOffsetObject = reorderedStream.Position;
                            inputStream.Position = chunkHeader.OffsetToObject;
                            inputStream.CopyTo(reorderedStream, chunkHeader.OffsetToReferences - chunkHeader.OffsetToObject);

                            // rewrite the chunk header with correct offsets
                            chunkHeader.OffsetToObject     = (int)newOffsetObject;
                            chunkHeader.OffsetToReferences = (int)newOffsetReferences;
                            reorderedStream.Position       = 0;
                            chunkHeader.Write(streamWriter);

                            // change the input stream to use reordered stream
                            inputStream          = reorderedStream;
                            inputStream.Position = 0;
                        }

                        // compress the stream
                        if (!disableCompressionIds.Contains(objectIds[i]))
                        {
                            objectInfo.IsCompressed = true;

                            var lz4OutputStream = new LZ4Stream(packStream, CompressionMode.Compress);
                            inputStream.CopyTo(lz4OutputStream);
                            lz4OutputStream.Flush();
                        }
                        else // copy the stream "as is"
                        {
                            // Write stream
                            inputStream.CopyTo(packStream);
                        }

                        // release the reordered created stream
                        if (chunkHeader != null)
                        {
                            inputStream.Dispose();
                        }

                        // Add updated object info
                        objectInfo.EndOffset = packStream.Position;
                        objects.Add(new KeyValuePair <ObjectId, ObjectInfo>(objectIds[i], objectInfo));
                    }
                }

                // Rewrite header
                header.Size         = packStream.Length;
                packStream.Position = 0;
                binaryWriter.Write(header);

                // Rewrite object locations
                packStream.Position = objectIdPosition;
                binaryWriter.Write(objects);
            }
        }