Exemple #1
0
        /// <summary>
        /// Do a full backup of a set of environments. Note that the order of the environments matter!
        /// </summary>
        public void ToFile(IEnumerable <StorageEnvironmentInformation> envs, string backupPath, CompressionLevel compression = CompressionLevel.Optimal,
                           Action <string> infoNotify = null,
                           Action backupStarted       = null)
        {
            infoNotify = infoNotify ?? (s => { });

            infoNotify("Voron backup db started");

            using (var file = new FileStream(backupPath, FileMode.Create))
            {
                using (var package = new ZipArchive(file, ZipArchiveMode.Create, leaveOpen: true))
                {
                    foreach (var e in envs)
                    {
                        infoNotify("Voron backup " + e.Name + "started");
                        var basePath  = Path.Combine(e.Folder, e.Name);
                        var env       = e.Env;
                        var dataPager = env.Options.DataPager;
                        var copier    = new DataCopier(env.Options.PageSize * 16);
                        Backup(env, compression, infoNotify, backupStarted, dataPager, package, basePath,
                               copier);
                    }

                    file.Flush(true); // make sure that we fully flushed to disk
                }
            }

            infoNotify("Voron backup db finished");
        }
Exemple #2
0
        public void ToFile(StorageEnvironment env, string backupPath,
                           CompressionLevel compression = CompressionLevel.Optimal,
                           Action <string> infoNotify   = null,
                           Action backupStarted         = null)
        {
            infoNotify = infoNotify ?? (s => { });

            infoNotify("Voron backup db started");

            using (var file = new FileStream(backupPath, FileMode.Create))
            {
                using (var package = new ZipArchive(file, ZipArchiveMode.Create, leaveOpen: true))
                {
                    infoNotify("Voron backup started");
                    var dataPager = env.Options.DataPager;
                    var copier    = new DataCopier(env.Options.PageSize * 16);
                    Backup(env, compression, infoNotify, backupStarted, dataPager, package, string.Empty,
                           copier);

                    file.Flush(true); // make sure that we fully flushed to disk
                }
            }

            infoNotify("Voron backup db finished");
        }
Exemple #3
0
    public void buyItem(Item item, int quantity)
    {
        if (Vars.cash < (item.cost * quantity))
        {
            Messenger.notEnoughtCash(item.itemName, quantity); return;
        }
        if (item.volume > .001f && (playerInventory.getFreeVolume() - (item.volume * quantity)) < 0)
        {
            Messenger.showMessage("Недостаточно места в инвентаре."); return;
        }
        Vars.cash -= (item.cost * quantity);
        item.cell.inventory.containerScreen.updateCashTxt();

        if (item.quantity == quantity)
        {
            playerInventory.addItemToCell(item.cell.takeItem(), null);
        }
        else
        {
            Item buyed = Instantiate <Transform>(ItemFactory.itemPrefab).GetComponent <Item>();
            buyed.init(DataCopier.copy(item.itemData));
            buyed.quantity = quantity;
            playerInventory.addItemToCell(buyed, null);
            item.quantity -= quantity;
        }
    }
Exemple #4
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="parent"></param>
        /// <returns></returns>
        public virtual DataType Clone(TaxObject parent)
        {
            DataType dt = DataCopier.CloneJson <DataType>(this);

            dt.Parent = parent;
            return(dt);
        }
Exemple #5
0
        /// <summary>
        /// Do a incremental backup of a set of environments. Note that the order of the environments matter!
        /// </summary>
        public long ToFile(IEnumerable <FullBackup.StorageEnvironmentInformation> envs, string backupPath, CompressionLevel compression = CompressionLevel.Optimal,
                           Action <string> infoNotify = null,
                           Action backupStarted       = null)
        {
            infoNotify = infoNotify ?? (s => { });

            long totalNumberOfBackedUpPages = 0;

            using (var file = SafeFileStream.Create(backupPath, FileMode.Create))
            {
                using (var package = new ZipArchive(file, ZipArchiveMode.Create, leaveOpen: true))
                {
                    foreach (var e in envs)
                    {
                        if (e.Env.Options.IncrementalBackupEnabled == false)
                        {
                            throw new InvalidOperationException("Incremental backup is disabled for this storage");
                        }
                        infoNotify("Voron backup " + e.Name + "started");
                        var basePath = Path.Combine(e.Folder, e.Name);
                        var env      = e.Env;
                        var copier   = new DataCopier(Constants.Storage.PageSize * 16);
                        var numberOfBackedUpPages = Incremental_Backup(env, compression, infoNotify,
                                                                       backupStarted, package, basePath, copier);
                        totalNumberOfBackedUpPages += numberOfBackedUpPages;
                    }
                }
                file.Flush(true); // make sure that this is actually persisted fully to disk

                return(totalNumberOfBackedUpPages);
            }
        }
Exemple #6
0
        public long ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal,
                           Action <string> infoNotify = null,
                           Action backupStarted       = null)
        {
            infoNotify = infoNotify ?? (s => { });

            if (env.Options.IncrementalBackupEnabled == false)
            {
                throw new InvalidOperationException("Incremental backup is disabled for this storage");
            }

            var copier = new DataCopier(Constants.Storage.PageSize * 16);

            using (var file = SafeFileStream.Create(backupPath, FileMode.Create))
            {
                long numberOfBackedUpPages;
                using (var package = new ZipArchive(file, ZipArchiveMode.Create, leaveOpen: true))
                {
                    numberOfBackedUpPages = Incremental_Backup(env, compression, infoNotify,
                                                               backupStarted, package, string.Empty, copier);
                }
                file.Flush(true); // make sure that this is actually persisted fully to disk
                return(numberOfBackedUpPages);
            }
        }
Exemple #7
0
        public void ConvertReturnsCorrectRowCount()
        {
            var opts = new DataCopier.Options {
            };

            opts.outputTable = tests.SqlServerWriterTests.TABLENAME;

            var count = DataCopier.Convert(
                opts,
                tests.XlsxTests.FILENAME,
                $"sql://{tests.SqlServerWriterTests.CONNECTIONSTRINGWITHDATABASE}"
                );

            Assert.Equal(3, count);
        }
        public void CopyData()
        {
            var sw = new Stopwatch();

            sw.Start();
            var token = new CancellationTokenSource();

            RenderProgressBar(token.Token, "Copying the data", 2);

            //copy data
            var empty = Console.BackgroundColor;

            Parallel.ForEach(_tables, table =>
            {
                var copier = new DataCopier(table, _chunkSize, _msConStr, _pgConStr);
                copier.CopyTable(ref _progress);
            });
            token.Cancel();
            sw.Stop();
            Console.BackgroundColor = empty;
            Console.Write($"({sw.Elapsed:g})\n");
        }
Exemple #9
0
        /// <summary>
        /// Do a full backup of a set of environments. Note that the order of the environments matter!
        /// </summary>
        public void ToFile(IEnumerable <StorageEnvironmentInformation> envs,
                           ZipArchive package,
                           CompressionLevel compression = CompressionLevel.Optimal,
                           Action <string> infoNotify   = null,
                           Action backupStarted         = null)
        {
            infoNotify = infoNotify ?? (s => { });

            infoNotify("Voron backup db started");

            foreach (var e in envs)
            {
                infoNotify("Voron backup " + e.Name + "started");
                var basePath  = Path.Combine(e.Folder, e.Name);
                var env       = e.Env;
                var dataPager = env.Options.DataPager;
                var copier    = new DataCopier(Constants.Storage.PageSize * 16);
                Backup(env, compression, infoNotify, backupStarted, dataPager, package, basePath,
                       copier);
            }

            infoNotify("Voron backup db finished");
        }
Exemple #10
0
 static void Main(string[] args)
 {
     CommandLine.Parser.Default.ParseArguments <cliOptions>(args)
     .WithParsed <cliOptions>(cliOpts =>
     {
         var opts = new DataCopier.Options {
             bufferRows = cliOpts.bufferRows,
             //writer options
             truncate   = cliOpts.truncate,
             inputTable = String.IsNullOrWhiteSpace(cliOpts.inputTable) ? null : cliOpts.inputTable.Trim(),
             // reader options
             outputTable     = String.IsNullOrWhiteSpace(cliOpts.outputTable) ? null : cliOpts.outputTable.Trim(),
             skipRows        = cliOpts.skipRows,
             leaveRows       = cliOpts.leaveRows,
             skipColumns     = cliOpts.skipColumns,
             maxColumns      = cliOpts.maxColumns,
             worksheetNumber = cliOpts.worksheetNumber
         };
         var startTime = DateTime.Now;
         int lines     = 0;
         try {
             lines = DataCopier.Convert(opts, cliOpts.readerUrl.Trim(), cliOpts.writerUrl.Trim());
         }
         catch (ArgumentException e) {
             Console.Error.WriteLine(e.Message);
         }
         var endTime        = DateTime.Now;
         var elapsedSeconds = (endTime - startTime).TotalMilliseconds / 1000F;
         Console.WriteLine($"{lines} lines copied in {elapsedSeconds} seconds ({lines/elapsedSeconds} lines per second).");
         // Console.WriteLine($"{.readerUrl} {opts.writerUrl}");
     })
     .WithNotParsed <cliOptions>((errs) =>
     {
         // HelpText.AutoBuild<cliOptions>()
     });
 }
Exemple #11
0
        private static long Incremental_Backup(StorageEnvironment env, CompressionLevel compression, Action <string> infoNotify,
                                               Action backupStarted, ZipArchive package, string basePath, DataCopier copier)
        {
            long numberOfBackedUpPages = 0;
            long lastWrittenLogFile    = -1;
            long lastWrittenLog4kb     = -1;
            bool backupSuccess         = true;
            IncrementalBackupInfo backupInfo;
            JournalInfo           journalInfo;

            var transactionPersistentContext = new TransactionPersistentContext(true);

            using (var txw = env.NewLowLevelTransaction(transactionPersistentContext, TransactionFlags.ReadWrite))
            {
                backupInfo  = env.HeaderAccessor.Get(ptr => ptr->IncrementalBackup);
                journalInfo = env.HeaderAccessor.Get(ptr => ptr->Journal);

                if (env.Journal.CurrentFile != null)
                {
                    lastWrittenLogFile = env.Journal.CurrentFile.Number;
                    lastWrittenLog4kb  = env.Journal.CurrentFile.WritePosIn4KbPosition;
                }

                // txw.Commit(); intentionally not committing
            }


            using (env.NewLowLevelTransaction(transactionPersistentContext, TransactionFlags.Read))
            {
                backupStarted?.Invoke(); // we let call know that we have started the backup

                var usedJournals = new List <JournalFile>();

                try
                {
                    long lastBackedUpPage = -1;
                    long lastBackedUpFile = -1;

                    var firstJournalToBackup = backupInfo.LastBackedUpJournal;

                    if (firstJournalToBackup == -1)
                    {
                        firstJournalToBackup = 0; // first time that we do incremental backup
                    }
                    for (var journalNum = firstJournalToBackup;
                         journalNum <= backupInfo.LastCreatedJournal;
                         journalNum++)
                    {
                        var num = journalNum;

                        var journalFile = GetJournalFile(env, journalNum, backupInfo, journalInfo);

                        journalFile.AddRef();

                        usedJournals.Add(journalFile);

                        var  startBackupAt      = 0L;
                        long numberOf4KbsToCopy = journalFile.JournalWriter.NumberOfAllocated4Kb;
                        if (journalFile.Number == backupInfo.LastBackedUpJournal)
                        {
                            startBackupAt       = backupInfo.LastBackedUpJournalPage + 1;
                            numberOf4KbsToCopy -= startBackupAt;
                        }

                        if (startBackupAt >= journalFile.JournalWriter.NumberOfAllocated4Kb) // nothing to do here
                        {
                            continue;
                        }

                        var part =
                            package.CreateEntry(
                                Path.Combine(basePath, StorageEnvironmentOptions.JournalName(journalNum))
                                , compression);
                        Debug.Assert(part != null);

                        if (journalFile.Number == lastWrittenLogFile)
                        {
                            numberOf4KbsToCopy -= (journalFile.JournalWriter.NumberOfAllocated4Kb - lastWrittenLog4kb);
                        }

                        using (var stream = part.Open())
                        {
                            copier.ToStream(env, journalFile, startBackupAt, numberOf4KbsToCopy, stream);
                            infoNotify(string.Format("Voron Incr copy journal number {0}", num));
                        }

                        lastBackedUpFile = journalFile.Number;
                        if (journalFile.Number == backupInfo.LastCreatedJournal)
                        {
                            lastBackedUpPage = startBackupAt + numberOf4KbsToCopy - 1;
                            // we used all of this file, so the next backup should start in the next file
                            if (lastBackedUpPage == (journalFile.JournalWriter.NumberOfAllocated4Kb - 1))
                            {
                                lastBackedUpPage = -1;
                                lastBackedUpFile++;
                            }
                        }

                        numberOfBackedUpPages += numberOf4KbsToCopy;
                    }

                    env.HeaderAccessor.Modify(header =>
                    {
                        header->IncrementalBackup.LastBackedUpJournal     = lastBackedUpFile;
                        header->IncrementalBackup.LastBackedUpJournalPage = lastBackedUpPage;
                    });
                }
                catch (Exception)
                {
                    backupSuccess = false;
                    throw;
                }
                finally
                {
                    var lastSyncedJournal = env.HeaderAccessor.Get(header => header->Journal).LastSyncedJournal;

                    foreach (var jrnl in usedJournals)
                    {
                        if (backupSuccess) // if backup succeeded we can remove journals
                        {
                            if (jrnl.Number < lastWrittenLogFile &&
                                // prevent deletion of the current journal and journals with a greater number
                                jrnl.Number < lastSyncedJournal)
                            // prevent deletion of journals that aren't synced with the data file
                            {
                                jrnl.DeleteOnClose = true;
                            }
                        }

                        jrnl.Release();
                    }
                }
                infoNotify(string.Format("Voron Incr Backup total {0} pages", numberOfBackedUpPages));
            }
            return(numberOfBackedUpPages);
        }
 private void Awake()
 {
     DataCopier.GetAndFill(_originalData, this, "_originalData");
 }
Exemple #13
0
        private static void Backup(
            StorageEnvironment env, CompressionLevel compression, Action <string> infoNotify,
            Action backupStarted, AbstractPager dataPager, ZipArchive package, string basePath, DataCopier copier)
        {
            var  usedJournals       = new List <JournalFile>();
            long lastWrittenLogPage = -1;
            long lastWrittenLogFile = -1;
            LowLevelTransaction txr = null;

            try
            {
                long allocatedPages;
                var  writePesistentContext = new TransactionPersistentContext(true);
                var  readPesistentContext  = new TransactionPersistentContext(true);
                using (var txw = env.NewLowLevelTransaction(writePesistentContext, TransactionFlags.ReadWrite)) // so we can snapshot the headers safely
                {
                    txr            = env.NewLowLevelTransaction(readPesistentContext, TransactionFlags.Read);   // now have snapshot view
                    allocatedPages = dataPager.NumberOfAllocatedPages;

                    Debug.Assert(HeaderAccessor.HeaderFileNames.Length == 2);
                    infoNotify("Voron copy headers for " + basePath);
                    VoronBackupUtil.CopyHeaders(compression, package, copier, env.Options, basePath);

                    // journal files snapshot
                    var files = env.Journal.Files; // thread safety copy

                    JournalInfo journalInfo = env.HeaderAccessor.Get(ptr => ptr->Journal);
                    for (var journalNum = journalInfo.CurrentJournal - journalInfo.JournalFilesCount + 1;
                         journalNum <= journalInfo.CurrentJournal;
                         journalNum++)
                    {
                        var journalFile = files.FirstOrDefault(x => x.Number == journalNum);
                        // first check journal files currently being in use
                        if (journalFile == null)
                        {
                            long journalSize;
                            using (var pager = env.Options.OpenJournalPager(journalNum))
                            {
                                journalSize = Bits.NextPowerOf2(pager.NumberOfAllocatedPages * env.Options.PageSize);
                            }

                            journalFile = new JournalFile(env, env.Options.CreateJournalWriter(journalNum, journalSize), journalNum);
                        }

                        journalFile.AddRef();
                        usedJournals.Add(journalFile);
                    }

                    if (env.Journal.CurrentFile != null)
                    {
                        lastWrittenLogFile = env.Journal.CurrentFile.Number;
                        lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition - 1;
                    }

                    // txw.Commit(); intentionally not committing
                }

                backupStarted?.Invoke();

                // data file backup
                var dataPart = package.CreateEntry(Path.Combine(basePath, Constants.DatabaseFilename), compression);
                Debug.Assert(dataPart != null);

                if (allocatedPages > 0) //only true if dataPager is still empty at backup start
                {
                    using (var dataStream = dataPart.Open())
                    {
                        // now can copy everything else
                        copier.ToStream(dataPager, 0, allocatedPages, dataStream);
                    }
                }

                try
                {
                    foreach (JournalFile journalFile in usedJournals)
                    {
                        var entryName   = Path.Combine(basePath, StorageEnvironmentOptions.JournalName(journalFile.Number));
                        var journalPart = package.CreateEntry(entryName, compression);

                        Debug.Assert(journalPart != null);

                        long pagesToCopy = journalFile.JournalWriter.NumberOfAllocatedPages;
                        if (journalFile.Number == lastWrittenLogFile)
                        {
                            pagesToCopy = lastWrittenLogPage + 1;
                        }

                        using (var stream = journalPart.Open())
                        {
                            copier.ToStream(env, journalFile, 0, pagesToCopy, stream);
                            infoNotify(string.Format("Voron copy journal file {0}", entryName));
                        }
                    }
                }
                finally
                {
                    foreach (var journalFile in usedJournals)
                    {
                        journalFile.Release();
                    }
                }
            }
            finally
            {
                txr?.Dispose();
            }
        }
        public void ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal, Action <string> infoNotify = null,
                           Action backupStarted = null)
        {
            if (env.Options.IncrementalBackupEnabled == false)
            {
                throw new InvalidOperationException("Incremental backup is disabled for this storage");
            }

            var pageNumberToPageInScratch = new Dictionary <long, long>();

            if (infoNotify == null)
            {
                infoNotify = str => { }
            }
            ;
            var toDispose = new List <IDisposable>();

            try
            {
                IncrementalBackupInfo backupInfo;
                long lastWrittenLogPage = -1;
                long lastWrittenLogFile = -1;

                using (var txw = env.NewTransaction(TransactionFlags.ReadWrite))
                {
                    backupInfo = env.HeaderAccessor.Get(ptr => ptr->IncrementalBackup);

                    if (env.Journal.CurrentFile != null)
                    {
                        lastWrittenLogFile = env.Journal.CurrentFile.Number;
                        lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition;
                    }

                    //txw.Commit(); - intentionally not committing
                }

                if (backupStarted != null)
                {
                    backupStarted();
                }

                infoNotify("Voron - reading storage journals for snapshot pages");

                var lastBackedUpFile     = backupInfo.LastBackedUpJournal;
                var lastBackedUpPage     = backupInfo.LastBackedUpJournalPage;
                var firstJournalToBackup = backupInfo.LastBackedUpJournal;

                if (firstJournalToBackup == -1)
                {
                    firstJournalToBackup = 0;                     // first time that we do incremental backup
                }
                var lastTransaction = new TransactionHeader {
                    TransactionId = -1
                };

                var recoveryPager = env.Options.CreateScratchPager("min-inc-backup.scratch");
                toDispose.Add(recoveryPager);
                int recoveryPage = 0;
                for (var journalNum = firstJournalToBackup; journalNum <= backupInfo.LastCreatedJournal; journalNum++)
                {
                    lastBackedUpFile = journalNum;
                    var journalFile = IncrementalBackup.GetJournalFile(env, journalNum, backupInfo);
                    try
                    {
                        using (var filePager = env.Options.OpenJournalPager(journalNum))
                        {
                            var reader = new JournalReader(filePager, recoveryPager, 0, null, recoveryPage);
                            reader.MaxPageToRead = lastBackedUpPage = journalFile.JournalWriter.NumberOfAllocatedPages;
                            if (journalNum == lastWrittenLogFile)                             // set the last part of the log file we'll be reading
                            {
                                reader.MaxPageToRead = lastBackedUpPage = lastWrittenLogPage;
                            }

                            if (lastBackedUpPage == journalFile.JournalWriter.NumberOfAllocatedPages)                             // past the file size
                            {
                                // move to the next
                                lastBackedUpPage = -1;
                                lastBackedUpFile++;
                            }

                            if (journalNum == backupInfo.LastBackedUpJournal)                             // continue from last backup
                            {
                                reader.SetStartPage(backupInfo.LastBackedUpJournalPage);
                            }
                            TransactionHeader *lastJournalTxHeader = null;
                            while (reader.ReadOneTransaction(env.Options))
                            {
                                // read all transactions here
                                lastJournalTxHeader = reader.LastTransactionHeader;
                            }

                            if (lastJournalTxHeader != null)
                            {
                                lastTransaction = *lastJournalTxHeader;
                            }

                            recoveryPage = reader.RecoveryPage;

                            foreach (var pagePosition in reader.TransactionPageTranslation)
                            {
                                var pageInJournal = pagePosition.Value.JournalPos;
                                var page          = recoveryPager.Read(pageInJournal);
                                pageNumberToPageInScratch[pagePosition.Key] = pageInJournal;
                                if (page.IsOverflow)
                                {
                                    var numberOfOverflowPages = recoveryPager.GetNumberOfOverflowPages(page.OverflowSize);
                                    for (int i = 1; i < numberOfOverflowPages; i++)
                                    {
                                        pageNumberToPageInScratch.Remove(page.PageNumber + i);
                                    }
                                }
                            }
                        }
                    }
                    finally
                    {
                        journalFile.Release();
                    }
                }

                if (pageNumberToPageInScratch.Count == 0)
                {
                    infoNotify("Voron - no changes since last backup, nothing to do");
                    return;
                }

                infoNotify("Voron - started writing snapshot file.");

                if (lastTransaction.TransactionId == -1)
                {
                    throw new InvalidOperationException("Could not find any transactions in the journals, but found pages to write? That ain't right.");
                }


                // it is possible that we merged enough transactions so the _merged_ output is too large for us.
                // Voron limit transactions to about 4GB each. That means that we can't just merge all transactions
                // blindly, for fear of hitting this limit. So we need to split things.
                // We are also limited to about 8 TB of data in general before we literally can't fit the number of pages into
                // pageNumberToPageInScratch even theoretically.
                // We're fine with saying that you need to run min inc backup before you hit 8 TB in your increment, so that works for now.
                // We are also going to use env.Options.MaxScratchBufferSize to set the actual transaction limit here, to avoid issues
                // down the road and to limit how big a single transaction can be before the theoretical 4GB limit.

                var nextJournalNum = lastBackedUpFile;
                using (var file = new FileStream(backupPath, FileMode.Create))
                {
                    using (var package = new ZipArchive(file, ZipArchiveMode.Create, leaveOpen: true))
                    {
                        var copier = new DataCopier(AbstractPager.PageSize * 16);

                        var finalPager = env.Options.CreateScratchPager("min-inc-backup-final.scratch");
                        toDispose.Add(finalPager);
                        finalPager.EnsureContinuous(null, 0, 1);                        //txHeader

                        foreach (var partition in Partition(pageNumberToPageInScratch.Values, env.Options.MaxNumberOfPagesInMergedTransaction))
                        {
                            int totalNumberOfPages = 0;
                            int overflowPages      = 0;
                            int start = 1;
                            foreach (var pageNum in partition)
                            {
                                var p    = recoveryPager.Read(pageNum);
                                var size = 1;
                                if (p.IsOverflow)
                                {
                                    size           = recoveryPager.GetNumberOfOverflowPages(p.OverflowSize);
                                    overflowPages += (size - 1);
                                }
                                totalNumberOfPages += size;
                                finalPager.EnsureContinuous(null, start, size);                                 //maybe increase size

                                MemoryUtils.Copy(finalPager.AcquirePagePointer(start), p.Base, size * AbstractPager.PageSize);

                                start += size;
                            }


                            var txPage = finalPager.AcquirePagePointer(0);
                            StdLib.memset(txPage, 0, AbstractPager.PageSize);
                            var txHeader = (TransactionHeader *)txPage;
                            txHeader->HeaderMarker           = Constants.TransactionHeaderMarker;
                            txHeader->FreeSpace              = lastTransaction.FreeSpace;
                            txHeader->Root                   = lastTransaction.Root;
                            txHeader->OverflowPageCount      = overflowPages;
                            txHeader->PageCount              = totalNumberOfPages - overflowPages;
                            txHeader->PreviousTransactionCrc = lastTransaction.PreviousTransactionCrc;
                            txHeader->TransactionId          = lastTransaction.TransactionId;
                            txHeader->NextPageNumber         = lastTransaction.NextPageNumber;
                            txHeader->LastPageNumber         = lastTransaction.LastPageNumber;
                            txHeader->TxMarker               = TransactionMarker.Commit | TransactionMarker.Merged;
                            txHeader->Compressed             = false;
                            txHeader->UncompressedSize       = txHeader->CompressedSize = totalNumberOfPages * AbstractPager.PageSize;

                            txHeader->Crc = Crc.Value(finalPager.AcquirePagePointer(1), 0, totalNumberOfPages * AbstractPager.PageSize);


                            var entry = package.CreateEntry(string.Format("{0:D19}.merged-journal", nextJournalNum), compression);
                            nextJournalNum++;
                            using (var stream = entry.Open())
                            {
                                copier.ToStream(finalPager.AcquirePagePointer(0), (totalNumberOfPages + 1) * AbstractPager.PageSize, stream);
                            }
                        }
                    }
                    file.Flush(true);                    // make sure we hit the disk and stay there
                }

                env.HeaderAccessor.Modify(header =>
                {
                    header->IncrementalBackup.LastBackedUpJournal     = lastBackedUpFile;
                    header->IncrementalBackup.LastBackedUpJournalPage = lastBackedUpPage;
                });
            }
            finally
            {
                foreach (var disposable in toDispose)
                {
                    disposable.Dispose();
                }
            }
        }
Exemple #15
0
 public void applyItemTake(int count)
 {
     if (count > 0)
     {
         if (takeFromSlot.item.quantity == count)
         {
             container.loot.Remove(takeFromSlot.item);
             inventory.addItemToCell(takeFromSlot.takeItem(), null);
         }
         else
         {
             takeFromSlot.item.quantity -= count;
             Item newItem = Instantiate <Transform>(ItemFactory.itemPrefab).GetComponent <Item>().init(DataCopier.copy(takeFromSlot.item.itemData));
             newItem.quantity = count;
             inventory.addItemToCell(newItem, null);
         }
         checkAllTaken();
     }
     itemDescriptor.setEnabled(ItemDescriptor.Type.LOOT, null);
 }
Exemple #16
0
        public long ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal,
                           Action <string> infoNotify = null)
        {
            infoNotify = infoNotify ?? (s => { });

            if (env.Options.IncrementalBackupEnabled == false)
            {
                throw new InvalidOperationException("Incremental backup is disabled for this storage");
            }

            long numberOfBackedUpPages = 0;

            var copier        = new DataCopier(AbstractPager.PageSize * 16);
            var backupSuccess = true;

            long lastWrittenLogPage = -1;
            long lastWrittenLogFile = -1;

            using (var file = new FileStream(backupPath, FileMode.Create))
                using (var package = new ZipArchive(file, ZipArchiveMode.Create))
                {
                    IncrementalBackupInfo backupInfo;
                    using (var txw = env.NewTransaction(TransactionFlags.ReadWrite))
                    {
                        backupInfo = env.HeaderAccessor.Get(ptr => ptr->IncrementalBackup);

                        if (env.Journal.CurrentFile != null)
                        {
                            lastWrittenLogFile = env.Journal.CurrentFile.Number;
                            lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition;
                        }

                        // txw.Commit(); intentionally not committing
                    }

                    using (env.NewTransaction(TransactionFlags.Read))
                    {
                        var usedJournals = new List <JournalFile>();

                        try
                        {
                            long lastBackedUpPage = -1;
                            long lastBackedUpFile = -1;

                            var firstJournalToBackup = backupInfo.LastBackedUpJournal;

                            if (firstJournalToBackup == -1)
                            {
                                firstJournalToBackup = 0; // first time that we do incremental backup
                            }
                            for (var journalNum = firstJournalToBackup; journalNum <= backupInfo.LastCreatedJournal; journalNum++)
                            {
                                var num = journalNum;

                                var journalFile = env.Journal.Files.FirstOrDefault(x => x.Number == journalNum); // first check journal files currently being in use
                                if (journalFile == null)
                                {
                                    long journalSize;
                                    try
                                    {
                                        using (var pager = env.Options.OpenJournalPager(journalNum))
                                        {
                                            journalSize = Utils.NearestPowerOfTwo(pager.NumberOfAllocatedPages * AbstractPager.PageSize);
                                        }
                                    }
                                    catch (Exception e)
                                    {
                                        if (backupInfo.LastBackedUpJournal == -1 && journalNum == 0 && e.Message.StartsWith("No such journal"))
                                        {
                                            throw new InvalidOperationException("The first incremental backup creation failed because the first journal file " +
                                                                                StorageEnvironmentOptions.JournalName(journalNum) + " was not found. " +
                                                                                "Did you turn on the incremental backup feature after initializing the storage? " +
                                                                                "In order to create backups incrementally the storage must be created with IncrementalBackupEnabled option set to 'true'.", e);
                                        }

                                        throw;
                                    }

                                    journalFile = new JournalFile(env.Options.CreateJournalWriter(journalNum, journalSize), journalNum);
                                }

                                journalFile.AddRef();

                                usedJournals.Add(journalFile);

                                var startBackupAt = 0L;
                                var pagesToCopy   = journalFile.JournalWriter.NumberOfAllocatedPages;
                                if (journalFile.Number == backupInfo.LastBackedUpJournal)
                                {
                                    startBackupAt = backupInfo.LastBackedUpJournalPage + 1;
                                    pagesToCopy  -= startBackupAt;
                                }

                                if (startBackupAt >= journalFile.JournalWriter.NumberOfAllocatedPages) // nothing to do here
                                {
                                    continue;
                                }

                                var part = package.CreateEntry(StorageEnvironmentOptions.JournalName(journalNum), compression);
                                Debug.Assert(part != null);

                                if (journalFile.Number == lastWrittenLogFile)
                                {
                                    pagesToCopy -= (journalFile.JournalWriter.NumberOfAllocatedPages - lastWrittenLogPage);
                                }

                                using (var stream = part.Open())
                                {
                                    copier.ToStream(journalFile, startBackupAt, pagesToCopy, stream);
                                    infoNotify(string.Format("Voron Incr copy journal number {0}", num));
                                }

                                lastBackedUpFile = journalFile.Number;
                                if (journalFile.Number == backupInfo.LastCreatedJournal)
                                {
                                    lastBackedUpPage = startBackupAt + pagesToCopy - 1;
                                    // we used all of this file, so the next backup should start in the next file
                                    if (lastBackedUpPage == (journalFile.JournalWriter.NumberOfAllocatedPages - 1))
                                    {
                                        lastBackedUpPage = -1;
                                        lastBackedUpFile++;
                                    }
                                }

                                numberOfBackedUpPages += pagesToCopy;
                            }

                            //Debug.Assert(lastBackedUpPage != -1);

                            env.HeaderAccessor.Modify(header =>
                            {
                                header->IncrementalBackup.LastBackedUpJournal     = lastBackedUpFile;
                                header->IncrementalBackup.LastBackedUpJournalPage = lastBackedUpPage;
                            });
                        }
                        catch (Exception)
                        {
                            backupSuccess = false;
                            throw;
                        }
                        finally
                        {
                            foreach (var jrnl in usedJournals)
                            {
                                if (backupSuccess)                        // if backup succeeded we can remove journals
                                {
                                    if (jrnl.Number < lastWrittenLogFile) // prevent deletion of the current journal and journals with a greater number
                                    {
                                        jrnl.DeleteOnClose = true;
                                    }
                                }

                                jrnl.Release();
                            }
                        }
                        infoNotify(string.Format("Voron Incr Backup total {0} pages", numberOfBackedUpPages));
                        return(numberOfBackedUpPages);
                    }
                }
        }
Exemple #17
0
        public void ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal,
                           Action <string> infoNotify = null)
        {
            infoNotify = infoNotify ?? (s => { });

            var         dataPager = env.Options.DataPager;
            var         copier    = new DataCopier(AbstractPager.PageSize * 16);
            Transaction txr       = null;

            try
            {
                infoNotify("Voron copy headers");

                using (var file = new FileStream(backupPath, FileMode.Create))
                    using (var package = new ZipArchive(file, ZipArchiveMode.Create))
                    {
                        long allocatedPages;

                        ImmutableAppendOnlyList <JournalFile> files;                // thread safety copy
                        long lastWrittenLogPage = -1;
                        long lastWrittenLogFile = -1;
                        using (var txw = env.NewTransaction(TransactionFlags.ReadWrite))         // so we can snapshot the headers safely
                        {
                            txr            = env.NewTransaction(TransactionFlags.Read);          // now have snapshot view
                            allocatedPages = dataPager.NumberOfAllocatedPages;

                            Debug.Assert(HeaderAccessor.HeaderFileNames.Length == 2);

                            VoronBackupUtil.CopyHeaders(compression, package, copier, env.Options);

                            // journal files snapshot
                            files = env.Journal.Files;

                            foreach (var journalFile in files)
                            {
                                journalFile.AddRef();
                            }

                            if (env.Journal.CurrentFile != null)
                            {
                                lastWrittenLogFile = env.Journal.CurrentFile.Number;
                                lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition - 1;
                            }

                            // txw.Commit(); intentionally not committing
                        }

                        // data file backup
                        var dataPart = package.CreateEntry(Constants.DatabaseFilename, compression);
                        Debug.Assert(dataPart != null);

                        if (allocatedPages > 0)                 //only true if dataPager is still empty at backup start
                        {
                            using (var dataStream = dataPart.Open())
                            {
                                // now can copy everything else
                                var firstDataPage = dataPager.Read(0);

                                copier.ToStream(firstDataPage.Base, AbstractPager.PageSize * allocatedPages, dataStream);
                            }
                        }

                        try
                        {
                            foreach (var journalFile in files)
                            {
                                var journalPart = package.CreateEntry(StorageEnvironmentOptions.JournalName(journalFile.Number), compression);

                                Debug.Assert(journalPart != null);

                                var pagesToCopy = journalFile.JournalWriter.NumberOfAllocatedPages;
                                if (journalFile.Number == lastWrittenLogFile)
                                {
                                    pagesToCopy = lastWrittenLogPage + 1;
                                }

                                using (var stream = journalPart.Open())
                                {
                                    copier.ToStream(journalFile, 0, pagesToCopy, stream);
                                    infoNotify(string.Format("Voron copy journal file {0} ", journalFile));
                                }
                            }
                        }
                        finally
                        {
                            foreach (var journalFile in files)
                            {
                                journalFile.Release();
                            }
                        }
                    }
            }
            finally
            {
                if (txr != null)
                {
                    txr.Dispose();
                }
            }
            infoNotify(string.Format("Voron backup db finished"));
        }
        public long ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal)
        {
            if (env.Options.IncrementalBackupEnabled == false)
            {
                throw new InvalidOperationException("Incremental backup is disabled for this storage");
            }

            long numberOfBackedUpPages = 0;

            var copier        = new DataCopier(AbstractPager.PageSize * 16);
            var backupSuccess = true;

            IncrementalBackupInfo backupInfo;
            long lastWrittenLogPage = -1;
            long lastWrittenLogFile = -1;

            using (var txw = env.NewTransaction(TransactionFlags.ReadWrite))
            {
                backupInfo = env.HeaderAccessor.Get(ptr => ptr->IncrementalBackup);

                if (env.Journal.CurrentFile != null)
                {
                    lastWrittenLogFile = env.Journal.CurrentFile.Number;
                    lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition;
                }

                // txw.Commit(); intentionally not committing
            }

            using (env.NewTransaction(TransactionFlags.Read))
            {
                var usedJournals = new List <JournalFile>();

                try
                {
                    using (var file = new FileStream(backupPath, FileMode.Create))
                        using (var package = new ZipArchive(file, ZipArchiveMode.Create))
                        {
                            long lastBackedUpPage = -1;
                            long lastBackedUpFile = -1;

                            var firstJournalToBackup = backupInfo.LastBackedUpJournal;

                            if (firstJournalToBackup == -1)
                            {
                                firstJournalToBackup = 0; // first time that we do incremental backup
                            }
                            for (var journalNum = firstJournalToBackup; journalNum <= backupInfo.LastCreatedJournal; journalNum++)
                            {
                                var journalFile = env.Journal.Files.FirstOrDefault(x => x.Number == journalNum); // first check journal files currently being in use
                                if (journalFile == null)
                                {
                                    long journalSize;
                                    using (var pager = env.Options.OpenJournalPager(journalNum))
                                    {
                                        journalSize = Utils.NearestPowerOfTwo(pager.NumberOfAllocatedPages * AbstractPager.PageSize);
                                        if (journalSize >= env.Options.MaxLogFileSize) // can't set for more than the max log file size
                                        {
                                            throw new InvalidOperationException("Recovered journal size is " + journalSize +
                                                                                ", while the maximum journal size can be " + env.Options.MaxLogFileSize);
                                        }
                                    }

                                    journalFile = new JournalFile(env.Options.CreateJournalWriter(journalNum, journalSize), journalNum);
                                }

                                journalFile.AddRef();

                                usedJournals.Add(journalFile);

                                var startBackupAt = 0L;
                                var pagesToCopy   = journalFile.JournalWriter.NumberOfAllocatedPages;
                                if (journalFile.Number == backupInfo.LastBackedUpJournal)
                                {
                                    startBackupAt = backupInfo.LastBackedUpJournalPage + 1;
                                    pagesToCopy  -= startBackupAt;
                                }

                                if (startBackupAt >= journalFile.JournalWriter.NumberOfAllocatedPages) // nothing to do here
                                {
                                    continue;
                                }

                                var part = package.CreateEntry(StorageEnvironmentOptions.JournalName(journalNum), compression);
                                Debug.Assert(part != null);

                                if (journalFile.Number == lastWrittenLogFile)
                                {
                                    pagesToCopy -= (journalFile.JournalWriter.NumberOfAllocatedPages - lastWrittenLogPage);
                                }

                                using (var stream = part.Open())
                                {
                                    copier.ToStream(journalFile, startBackupAt, pagesToCopy, stream);
                                }

                                lastBackedUpFile = journalFile.Number;
                                if (journalFile.Number == backupInfo.LastCreatedJournal)
                                {
                                    lastBackedUpPage = startBackupAt + pagesToCopy - 1;
                                    // we used all of this file, so the next backup should start in the next file
                                    if (lastBackedUpPage == (journalFile.JournalWriter.NumberOfAllocatedPages - 1))
                                    {
                                        lastBackedUpPage = -1;
                                        lastBackedUpFile++;
                                    }
                                }

                                numberOfBackedUpPages += pagesToCopy;
                            }

                            //Debug.Assert(lastBackedUpPage != -1);

                            env.HeaderAccessor.Modify(header =>
                            {
                                header->IncrementalBackup.LastBackedUpJournal     = lastBackedUpFile;
                                header->IncrementalBackup.LastBackedUpJournalPage = lastBackedUpPage;
                            });
                        }
                }
                catch (Exception)
                {
                    backupSuccess = false;
                    throw;
                }
                finally
                {
                    foreach (var file in usedJournals)
                    {
                        if (backupSuccess)                         // if backup succeeded we can remove journals
                        {
                            if (file.Number != lastWrittenLogFile) // prevent deletion of the current journal
                            {
                                file.DeleteOnClose = true;
                            }
                        }

                        file.Release();
                    }
                }

                return(numberOfBackedUpPages);
            }
        }
Exemple #19
0
        public void ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal,
                           Action <string> infoNotify = null,
                           Action backupStarted       = null)
        {
            infoNotify = infoNotify ?? (s => { });

            var         dataPager = env.Options.DataPager;
            var         copier    = new DataCopier(AbstractPager.PageSize * 16);
            Transaction txr       = null;

            try
            {
                infoNotify("Voron copy headers");

                using (var file = new FileStream(backupPath, FileMode.Create))
                {
                    using (var package = new ZipArchive(file, ZipArchiveMode.Create, leaveOpen: true))
                    {
                        long allocatedPages;

                        ImmutableAppendOnlyList <JournalFile> files; // thread safety copy
                        var  usedJournals       = new List <JournalFile>();
                        long lastWrittenLogPage = -1;
                        long lastWrittenLogFile = -1;
                        using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) // so we can snapshot the headers safely
                        {
                            txr            = env.NewTransaction(TransactionFlags.Read);  // now have snapshot view
                            allocatedPages = dataPager.NumberOfAllocatedPages;

                            Debug.Assert(HeaderAccessor.HeaderFileNames.Length == 2);

                            VoronBackupUtil.CopyHeaders(compression, package, copier, env.Options);

                            // journal files snapshot
                            files = env.Journal.Files;

                            JournalInfo journalInfo = env.HeaderAccessor.Get(ptr => ptr->Journal);
                            for (var journalNum = journalInfo.CurrentJournal - journalInfo.JournalFilesCount + 1; journalNum <= journalInfo.CurrentJournal; journalNum++)
                            {
                                var journalFile = files.FirstOrDefault(x => x.Number == journalNum); // first check journal files currently being in use
                                if (journalFile == null)
                                {
                                    long journalSize;
                                    using (var pager = env.Options.OpenJournalPager(journalNum))
                                    {
                                        journalSize = Utils.NearestPowerOfTwo(pager.NumberOfAllocatedPages * AbstractPager.PageSize);
                                    }

                                    journalFile = new JournalFile(env.Options.CreateJournalWriter(journalNum, journalSize), journalNum);
                                }

                                journalFile.AddRef();
                                usedJournals.Add(journalFile);
                            }

                            if (env.Journal.CurrentFile != null)
                            {
                                lastWrittenLogFile = env.Journal.CurrentFile.Number;
                                lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition - 1;
                            }

                            // txw.Commit(); intentionally not committing
                        }

                        if (backupStarted != null)
                        {
                            backupStarted();
                        }

                        // data file backup
                        var dataPart = package.CreateEntry(Constants.DatabaseFilename, compression);
                        Debug.Assert(dataPart != null);

                        if (allocatedPages > 0) //only true if dataPager is still empty at backup start
                        {
                            using (var dataStream = dataPart.Open())
                            {
                                // now can copy everything else
                                var firstDataPage = dataPager.Read(null, 0);

                                copier.ToStream(firstDataPage.Base, AbstractPager.PageSize * allocatedPages, dataStream);
                            }
                        }

                        try
                        {
                            foreach (var journalFile in usedJournals)
                            {
                                var journalPart = package.CreateEntry(StorageEnvironmentOptions.JournalName(journalFile.Number), compression);

                                Debug.Assert(journalPart != null);

                                var pagesToCopy = journalFile.JournalWriter.NumberOfAllocatedPages;
                                if (journalFile.Number == lastWrittenLogFile)
                                {
                                    pagesToCopy = lastWrittenLogPage + 1;
                                }

                                using (var stream = journalPart.Open())
                                {
                                    copier.ToStream(journalFile, 0, pagesToCopy, stream);
                                    infoNotify(string.Format("Voron copy journal file {0} ", journalFile));
                                }
                            }
                        }
                        finally
                        {
                            foreach (var journalFile in usedJournals)
                            {
                                journalFile.Release();
                            }
                        }
                    }
                    file.Flush(true); // make sure that we fully flushed to disk
                }
            }
            finally
            {
                if (txr != null)
                {
                    txr.Dispose();
                }
            }
            infoNotify(string.Format("Voron backup db finished"));
        }
Exemple #20
0
        private static void Backup(
            StorageEnvironment env, CompressionLevel compression, Action <string> infoNotify,
            Action backupStarted, AbstractPager dataPager, ZipArchive package, string basePath, DataCopier copier)
        {
            var  usedJournals       = new List <JournalFile>();
            long lastWrittenLogPage = -1;
            long lastWrittenLogFile = -1;
            LowLevelTransaction txr = null;
            var backupSuccess       = false;

            try
            {
                long allocatedPages;
                var  writePesistentContext = new TransactionPersistentContext(true);
                var  readPesistentContext  = new TransactionPersistentContext(true);
                using (var txw = env.NewLowLevelTransaction(writePesistentContext, TransactionFlags.ReadWrite)) // so we can snapshot the headers safely
                {
                    txr            = env.NewLowLevelTransaction(readPesistentContext, TransactionFlags.Read);   // now have snapshot view
                    allocatedPages = dataPager.NumberOfAllocatedPages;

                    Debug.Assert(HeaderAccessor.HeaderFileNames.Length == 2);
                    infoNotify("Voron copy headers for " + basePath);
                    VoronBackupUtil.CopyHeaders(compression, package, copier, env.Options, basePath);

                    // journal files snapshot
                    var files = env.Journal.Files; // thread safety copy

                    JournalInfo journalInfo = env.HeaderAccessor.Get(ptr => ptr->Journal);
                    for (var journalNum = journalInfo.CurrentJournal - journalInfo.JournalFilesCount + 1;
                         journalNum <= journalInfo.CurrentJournal;
                         journalNum++)
                    {
                        var journalFile = files.FirstOrDefault(x => x.Number == journalNum);
                        // first check journal files currently being in use
                        if (journalFile == null)
                        {
                            long journalSize;
                            using (var pager = env.Options.OpenJournalPager(journalNum))
                            {
                                journalSize = Bits.NextPowerOf2(pager.NumberOfAllocatedPages * Constants.Storage.PageSize);
                            }

                            journalFile = new JournalFile(env, env.Options.CreateJournalWriter(journalNum, journalSize), journalNum);
                        }

                        journalFile.AddRef();
                        usedJournals.Add(journalFile);
                    }

                    if (env.Journal.CurrentFile != null)
                    {
                        lastWrittenLogFile = env.Journal.CurrentFile.Number;
                        lastWrittenLogPage = env.Journal.CurrentFile.WritePosIn4KbPosition - 1;
                    }

                    // txw.Commit(); intentionally not committing
                }

                backupStarted?.Invoke();

                // data file backup
                var dataPart = package.CreateEntry(Path.Combine(basePath, Constants.DatabaseFilename), compression);
                Debug.Assert(dataPart != null);

                if (allocatedPages > 0) //only true if dataPager is still empty at backup start
                {
                    using (var dataStream = dataPart.Open())
                    {
                        // now can copy everything else
                        copier.ToStream(dataPager, 0, allocatedPages, dataStream);
                    }
                }

                try
                {
                    long lastBackedupJournal = 0;
                    foreach (var journalFile in usedJournals)
                    {
                        var entryName   = StorageEnvironmentOptions.JournalName(journalFile.Number);
                        var journalPart = package.CreateEntry(Path.Combine(basePath, entryName), compression);

                        Debug.Assert(journalPart != null);

                        long pagesToCopy = journalFile.JournalWriter.NumberOfAllocated4Kb;
                        if (journalFile.Number == lastWrittenLogFile)
                        {
                            pagesToCopy = lastWrittenLogPage + 1;
                        }

                        using (var stream = journalPart.Open())
                        {
                            copier.ToStream(env, journalFile, 0, pagesToCopy, stream);
                            infoNotify(string.Format("Voron copy journal file {0}", entryName));
                        }

                        lastBackedupJournal = journalFile.Number;
                    }

                    if (env.Options.IncrementalBackupEnabled)
                    {
                        env.HeaderAccessor.Modify(header =>
                        {
                            header->IncrementalBackup.LastBackedUpJournal = lastBackedupJournal;

                            //since we backed-up everything, no need to start next incremental backup from the middle
                            header->IncrementalBackup.LastBackedUpJournalPage = -1;
                        });
                    }
                    backupSuccess = true;
                }
                catch (Exception)
                {
                    backupSuccess = false;
                    throw;
                }
                finally
                {
                    var lastSyncedJournal = env.HeaderAccessor.Get(header => header->Journal).LastSyncedJournal;
                    foreach (var journalFile in usedJournals)
                    {
                        if (backupSuccess)                                 // if backup succeeded we can remove journals
                        {
                            if (journalFile.Number < lastWrittenLogFile && // prevent deletion of the current journal and journals with a greater number
                                journalFile.Number < lastSyncedJournal)    // prevent deletion of journals that aren't synced with the data file
                            {
                                journalFile.DeleteOnClose = true;
                            }
                        }

                        journalFile.Release();
                    }
                }
            }
            finally
            {
                txr?.Dispose();
            }
        }
Exemple #21
0
        public JournalInfo CopyHeaders(CompressionLevel compressionLevel, ZipArchive package, DataCopier copier, StorageEnvironmentOptions envOptions, string basePath)
        {
            _locker.EnterReadLock(); //race between reading the headers while modifying them
            try
            {
                var header  = stackalloc FileHeader[1];
                var success = false;
                foreach (var headerFileName in HeaderFileNames)
                {
                    if (envOptions.ReadHeader(headerFileName, header) == false)
                    {
                        continue;
                    }

                    success = true;

                    var headerPart = package.CreateEntry(Path.Combine(basePath, headerFileName), compressionLevel);
                    Debug.Assert(headerPart != null);

                    using (var headerStream = headerPart.Open())
                    {
                        copier.ToStream((byte *)header, sizeof(FileHeader), headerStream);
                    }
                }

                if (!success)
                {
                    throw new InvalidDataException($"Failed to read both file headers (headers.one & headers.two) from path: {basePath}, possible corruption.");
                }

                return(_theHeader->Journal);
            }
            finally
            {
                _locker.ExitReadLock();
            }
        }
Exemple #22
0
        internal static void CopyHeaders(CompressionLevel compression, ZipArchive package, DataCopier copier, StorageEnvironmentOptions storageEnvironmentOptions, string basePath)
        {
            var success = false;

            foreach (var headerFileName in HeaderAccessor.HeaderFileNames)
            {
                var header = stackalloc FileHeader[1];

                if (!storageEnvironmentOptions.ReadHeader(headerFileName, header))
                {
                    continue;
                }

                success = true;

                var headerPart = package.CreateEntry(Path.Combine(basePath, headerFileName), compression);
                Debug.Assert(headerPart != null);

                using (var headerStream = headerPart.Open())
                {
                    copier.ToStream((byte *)header, sizeof(FileHeader), headerStream);
                }
            }

            if (!success)
            {
                throw new InvalidDataException($"Failed to read both file headers (headers.one & headers.two) from path: {basePath}, possible corruption.");
            }
        }
Exemple #23
0
 public void SetUp()
 {
     _unitUnderTest = new DataCopier();
 }
Exemple #24
0
        public long ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal,
                           Action <string> infoNotify = null,
                           Action backupStarted       = null)
        {
            infoNotify = infoNotify ?? (s => { });

            if (env.Options.IncrementalBackupEnabled == false)
            {
                throw new InvalidOperationException("Incremental backup is disabled for this storage");
            }

            long numberOfBackedUpPages = 0;

            var copier        = new DataCopier(env.Options.PageSize * 16);
            var backupSuccess = true;

            long lastWrittenLogPage = -1;
            long lastWrittenLogFile = -1;

            using (var file = new FileStream(backupPath, FileMode.Create))
            {
                using (var package = new ZipArchive(file, ZipArchiveMode.Create, leaveOpen: true))
                {
                    IncrementalBackupInfo backupInfo;
                    using (var txw = env.NewLowLevelTransaction(TransactionFlags.ReadWrite))
                    {
                        backupInfo = env.HeaderAccessor.Get(ptr => ptr->IncrementalBackup);

                        if (env.Journal.CurrentFile != null)
                        {
                            lastWrittenLogFile = env.Journal.CurrentFile.Number;
                            lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition;
                        }

                        // txw.Commit(); intentionally not committing
                    }

                    using (env.NewLowLevelTransaction(TransactionFlags.Read))
                    {
                        if (backupStarted != null)
                        {
                            backupStarted();// we let call know that we have started the backup
                        }
                        var usedJournals = new List <JournalFile>();

                        try
                        {
                            long lastBackedUpPage = -1;
                            long lastBackedUpFile = -1;

                            var firstJournalToBackup = backupInfo.LastBackedUpJournal;

                            if (firstJournalToBackup == -1)
                            {
                                firstJournalToBackup = 0; // first time that we do incremental backup
                            }
                            for (var journalNum = firstJournalToBackup; journalNum <= backupInfo.LastCreatedJournal; journalNum++)
                            {
                                var num = journalNum;

                                var journalFile = GetJournalFile(env, journalNum, backupInfo);

                                journalFile.AddRef();

                                usedJournals.Add(journalFile);

                                var  startBackupAt = 0L;
                                long pagesToCopy   = journalFile.JournalWriter.NumberOfAllocatedPages;
                                if (journalFile.Number == backupInfo.LastBackedUpJournal)
                                {
                                    startBackupAt = backupInfo.LastBackedUpJournalPage + 1;
                                    pagesToCopy  -= startBackupAt;
                                }

                                if (startBackupAt >= journalFile.JournalWriter.NumberOfAllocatedPages) // nothing to do here
                                {
                                    continue;
                                }

                                var part = package.CreateEntry(StorageEnvironmentOptions.JournalName(journalNum), compression);
                                Debug.Assert(part != null);

                                if (journalFile.Number == lastWrittenLogFile)
                                {
                                    pagesToCopy -= (journalFile.JournalWriter.NumberOfAllocatedPages - lastWrittenLogPage);
                                }

                                using (var stream = part.Open())
                                {
                                    copier.ToStream(env, journalFile, startBackupAt, pagesToCopy, stream);
                                    infoNotify(string.Format("Voron Incr copy journal number {0}", num));
                                }

                                lastBackedUpFile = journalFile.Number;
                                if (journalFile.Number == backupInfo.LastCreatedJournal)
                                {
                                    lastBackedUpPage = startBackupAt + pagesToCopy - 1;
                                    // we used all of this file, so the next backup should start in the next file
                                    if (lastBackedUpPage == (journalFile.JournalWriter.NumberOfAllocatedPages - 1))
                                    {
                                        lastBackedUpPage = -1;
                                        lastBackedUpFile++;
                                    }
                                }

                                numberOfBackedUpPages += pagesToCopy;
                            }


                            env.HeaderAccessor.Modify(header =>
                            {
                                header->IncrementalBackup.LastBackedUpJournal     = lastBackedUpFile;
                                header->IncrementalBackup.LastBackedUpJournalPage = lastBackedUpPage;
                            });
                        }
                        catch (Exception)
                        {
                            backupSuccess = false;
                            throw;
                        }
                        finally
                        {
                            var lastSyncedJournal = env.HeaderAccessor.Get(header => header->Journal).LastSyncedJournal;

                            foreach (var jrnl in usedJournals)
                            {
                                if (backupSuccess)                          // if backup succeeded we can remove journals
                                {
                                    if (jrnl.Number < lastWrittenLogFile && // prevent deletion of the current journal and journals with a greater number
                                        jrnl.Number < lastSyncedJournal)    // prevent deletion of journals that aren't synced with the data file
                                    {
                                        jrnl.DeleteOnClose = true;
                                    }
                                }

                                jrnl.Release();
                            }
                        }
                        infoNotify(string.Format("Voron Incr Backup total {0} pages", numberOfBackedUpPages));
                    }
                }
                file.Flush(true); // make sure that this is actually persisted fully to disk
                return(numberOfBackedUpPages);
            }
        }
Exemple #25
0
        internal static void CopyHeaders(CompressionLevel compression, ZipArchive package, DataCopier copier, StorageEnvironmentOptions storageEnvironmentOptions)
        {
            foreach (var headerFileName in HeaderAccessor.HeaderFileNames)
            {
                var header = stackalloc FileHeader[1];

                if (!storageEnvironmentOptions.ReadHeader(headerFileName, header))
                {
                    continue;
                }

                var headerPart = package.CreateEntry(headerFileName, compression);
                Debug.Assert(headerPart != null);

                using (var headerStream = headerPart.Open())
                {
                    copier.ToStream((byte *)header, sizeof(FileHeader), headerStream, CancellationToken.None);
                }
            }
        }