Beispiel #1
0
        /// <summary>
        /// Updates modified data indices and writes enqueued files to archives
        /// <para>Note: IndexFile saving is limited to new entries if the container was opened remotely</para>
        /// </summary>
        /// <param name="directory"></param>
        /// <param name="dispose">Delete old files</param>
        /// <param name="configContainer"></param>
        public void Save(string directory, Configs.ConfigContainer configContainer = null)
        {
            bool sameDirectory = directory.EqualsIC(_sourceDirectory);

            // save altered Data archive indices
            if (!IsRemote)
            {
                foreach (var index in DataIndices)
                {
                    if (index.IsGroupIndex)
                    {
                        continue;
                    }

                    if (index.RequiresSave)
                    {
                        // save the index file and blob
                        string prevBlob = Helpers.GetCDNPath(index.Checksum.ToString(), "data", _sourceDirectory);
                        index.Write(directory, configContainer);
                        index.WriteBlob(directory, prevBlob);
                    }
                    else if (!sameDirectory)
                    {
                        // copy the index file and blob
                        string oldblob = Helpers.GetCDNPath(index.Checksum.ToString(), "data", _sourceDirectory);
                        string newblob = Helpers.GetCDNPath(index.Checksum.ToString(), "data", directory, true);
                        File.Copy(oldblob, newblob);
                        File.Copy(oldblob + ".index", newblob + ".index");
                    }
                }
            }

            // prevent duplicated entries
            var duplicates = QueuedEntries.Keys
                             .Where(k => GetIndexFileAndEntry(IndexType.Data, k, out _) != null)
                             .ToArray();

            foreach (var key in duplicates)
            {
                QueuedEntries.Remove(key);
            }

            // create any new archive indices
            var partitions = EnumerablePartitioner.ConcreteBatch(QueuedEntries.Values, ArchiveDataSize, (x) => x.EBlock.CompressedSize);

            foreach (var entries in partitions)
            {
                IndexFile index = new IndexFile(IndexType.Data);
                index.Add(entries);
                index.Write(directory, configContainer);
                index.WriteBlob(directory);
            }

            // reload indices
            Open(directory, useParallelism: _useParallelism);
        }
Beispiel #2
0
        /// <summary>
        /// Updates modified data indices and writes enqueued files to archives
        /// </summary>
        /// <param name="directory"></param>
        /// <param name="dispose">Delete old files</param>
        /// <param name="configContainer"></param>
        public void Save(string directory, Configs.ConfigContainer configContainer = null)
        {
            bool sameDirectory = directory.Equals(_sourceDirectory, StringComparison.OrdinalIgnoreCase);

            // save altered Data archive indices
            foreach (var index in DataIndices)
            {
                if (index.IsGroupIndex)
                {
                    continue;
                }

                if (index.RequiresSave)
                {
                    // save the index file and blob
                    string prevBlob = Helpers.GetCDNPath(index.Checksum.ToString(), "data", _sourceDirectory);
                    index.Write(directory, configContainer);
                    index.WriteBlob(directory, prevBlob);
                }
                else if (!sameDirectory)
                {
                    // copy the index file and blob
                    string oldblob = Helpers.GetCDNPath(index.Checksum.ToString(), "data", _sourceDirectory);
                    string newblob = Helpers.GetCDNPath(index.Checksum.ToString(), "data", directory, true);
                    File.Copy(oldblob, newblob);
                    File.Copy(oldblob + ".index", newblob + ".index");
                }
            }

            // create any new archive indices
            var partitions = EnumerablePartitioner.ConcreteBatch(_fileQueue.Values, ArchiveDataSize, (x) => x.EBlock.CompressedSize);

            foreach (var entries in partitions)
            {
                IndexFile index = new IndexFile(IndexType.Data);
                index.Add(entries);
                index.Write(directory, configContainer);
                index.WriteBlob(directory);
            }

            // TODO 1. verify if this is required 2. fix
            // compute the Data Index Group hash
            //GenerateIndexGroup(directory, configContainer);

            // reload indices
            _indices.Clear();
            Open(directory, _useParallelism);
        }
Beispiel #3
0
        /// <summary>
        /// Writes the page data and calculates the page lookups
        /// </summary>
        /// <typeparam name="T"></typeparam>
        /// <param name="bw"></param>
        /// <param name="pageSize"></param>
        /// <param name="container"></param>
        /// <returns></returns>
        private PageIndexTable WritePageImpl <T>(BinaryWriter bw, int pageSize, IDictionary <MD5Hash, T> container) where T : EncodingEntryBase
        {
            PageIndexTable pageIndices = new PageIndexTable();
            bool           EOFflag     = typeof(T) == typeof(EncodingEncodedEntry);

            using (var md5 = MD5.Create())
            {
                // split entries into pages of pageSize
                var  pages     = EnumerablePartitioner.ConcreteBatch(container.Values, pageSize, (x) => x.Size);
                uint pageCount = (uint)pages.Count();

                // set Header PageCount
                EncodingHeader.SetPageCount <T>(pageCount);

                uint index = pageCount;
                foreach (var page in pages)
                {
                    // write page entries and pad to pageSize
                    page.ForEach(x => x.Write(bw, EncodingHeader));

                    // apply EOF flag (EKey page)
                    if (EOFflag && --index == 0)
                    {
                        bw.Write(new byte[EncodingHeader.EKeyHashSize]);
                        bw.Write(0xFFFFFFFF);
                    }

                    // pad to page size
                    bw.Write(new byte[pageSize - (bw.BaseStream.Position % pageSize)]);

                    // create page index record
                    pageIndices[page[0].Key] = bw.BaseStream.HashSlice(md5, bw.BaseStream.Position - pageSize, pageSize);
                    page.Clear();
                }
            }

            return(pageIndices);
        }