示例#1
0
        public BackendManager(string backendurl, Options options, IBackendWriter statwriter, LocalDatabase database)
        {
            m_options = options;
            m_backendurl = backendurl;
            m_statwriter = statwriter;
            m_taskControl = statwriter as BasicResults;
            m_db = new DatabaseCollector(database, statwriter);

            m_backend = DynamicLoader.BackendLoader.GetBackend(m_backendurl, m_options.RawOptions);
            if (m_backend == null)
                throw new Exception(string.Format("Backend not supported: {0}", m_backendurl));

            if (!m_options.NoEncryption)
            {
                m_encryption = DynamicLoader.EncryptionLoader.GetModule(m_options.EncryptionModule, m_options.Passphrase, m_options.RawOptions);
                if (m_encryption == null)
                    throw new Exception(string.Format("Encryption method not supported: ", m_options.EncryptionModule));
            }

            if (m_taskControl != null)
                m_taskControl.StateChangedEvent += (state) => {
                    if (state == TaskControlState.Abort)
                        m_thread.Abort();
                };
            m_queue = new BlockingQueue<FileEntryItem>(options.SynchronousUpload ? 1 : (options.AsynchronousUploadLimit == 0 ? int.MaxValue : options.AsynchronousUploadLimit));
            m_thread = new System.Threading.Thread(this.ThreadRun);
            m_thread.Name = "Backend Async Worker";
            m_thread.IsBackground = true;
            m_thread.Start();
        }
        public static LocalDatabase RunDatabaseMigrationTests()
        {
            var database = new LocalDatabase();

            var versionUpdateCount = 0;
            var versionChecker = new Mock<IVersionChecker>();
            versionChecker
                .Setup(vc => vc.VersionExists(It.IsAny<string>(), It.IsAny<long>()))
                .Returns<string, long>((s, l) => false);
            versionChecker
                .Setup(vc => vc.AddVersion(It.IsAny<string>(), It.IsAny<long>()))
                .Callback<string, long>((s, l) => versionUpdateCount++);
            var descriptors = new List<ModuleDescriptor> { new SampleModuleDescriptor() };

            database.RunMigrations(descriptors, versionChecker.Object);

            // Should run 2 migration scripts
            Assert.AreEqual(versionUpdateCount, 2);

            // Check, if tables are created
            CheckIfTablesExist(database.SqlConnection);

            // Shouldn't run the same migrations second time
            versionUpdateCount = 0;
            versionChecker
                .Setup(vc => vc.VersionExists(It.IsAny<string>(), It.IsAny<long>()))
                .Returns<string, long>((s, l) => true);

            database.RunMigrations(descriptors);

            // Should run 0 migration scripts
            Assert.AreEqual(versionUpdateCount, 0);

            return database;
        }
示例#3
0
        /// <summary>
        /// Helper method that verifies uploaded volumes and updates their state in the database.
        /// Throws an error if there are issues with the remote storage
        /// </summary>
        /// <param name="options">The options used</param>
        /// <param name="database">The database to compare with</param>
        /// <param name="log">The log instance to use</param>
        public static void VerifyLocalList(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log)
        {
            var locallist = database.GetRemoteVolumes();
            foreach(var i in locallist)
            {
                switch (i.State)
                {
                    case RemoteVolumeState.Uploaded:
                    case RemoteVolumeState.Verified:
                    case RemoteVolumeState.Deleted:
                        break;

                    case RemoteVolumeState.Temporary:
                    case RemoteVolumeState.Deleting:
                    case RemoteVolumeState.Uploading:
                        log.AddMessage(string.Format("removing remote file listed as {0}: {1}", i.State, i.Name));
                        try
                        {
                            backend.Delete(i.Name, i.Size, true);
                        }
                        catch (Exception ex)
                        {
                            log.AddWarning(string.Format("Failed to erase file {0}, treating as deleted: {1}", i.Name, ex.Message), ex);
                        }

                        break;

                    default:
                        log.AddWarning(string.Format("unknown state for remote file listed as {0}: {1}", i.State, i.Name), null);
                        break;
                }

                backend.FlushDbMessages();
            }
        }
示例#4
0
        /// <summary>
        /// Helper method that verifies uploaded volumes and updates their state in the database.
        /// Throws an error if there are issues with the remote storage
        /// </summary>
        /// <param name="backend">The backend instance to use</param>
        /// <param name="options">The options used</param>
        /// <param name="database">The database to compare with</param>
        /// <param name="log">The log instance to use</param>
        public static void VerifyRemoteList(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log)
		{
			var tp = RemoteListAnalysis(backend, options, database, log);
			long extraCount = 0;
			long missingCount = 0;
            
			foreach(var n in tp.ExtraVolumes)
			{
				log.AddWarning(string.Format("Extra unknown file: {0}", n.File.Name), null);
				extraCount++;
			}

			foreach(var n in tp.MissingVolumes)
			{
				log.AddWarning(string.Format("Missing file: {0}", n.Name), null);
				missingCount++;
			}

			if (extraCount > 0)
			{
				var s = string.Format("Found {0} remote files that are not recorded in local storage, please run repair", extraCount);
				log.AddError(s, null);
				throw new Exception(s);
			}

            var lookup = new Dictionary<string, string>();
            var doubles = new Dictionary<string, string>();
            foreach(var v in tp.ParsedVolumes)
            {
                if (lookup.ContainsKey(v.File.Name))
                    doubles[v.File.Name] = null;
                else
                    lookup[v.File.Name] = null;
            }

            if (doubles.Count > 0)
            {
                var s = string.Format("Found remote files reported as duplicates, either the backend module is broken or you need to manually remove the extra copies.\nThe following files were found multiple times: {0}", string.Join(", ", doubles.Keys));
                log.AddError(s, null);
                throw new Exception(s);
            }

            if (missingCount > 0)
            {
            	string s;
                if (!tp.BackupPrefixes.Contains(options.Prefix) && tp.BackupPrefixes.Length > 0)
                	s = string.Format("Found {0} files that are missing from the remote storage, and no files with the backup prefix {1}, but found the following backup prefixes: {2}", missingCount, options.Prefix, string.Join(", ", tp.BackupPrefixes));
                else
                	s = string.Format("Found {0} files that are missing from the remote storage, please run repair", missingCount);
                
                log.AddError(s, null);
                throw new Exception(s);
            }            
        }
示例#5
0
        internal static void UpdateOptionsFromDb(LocalDatabase db, Options options, System.Data.IDbTransaction transaction = null)
        {
            string n = null;
            var opts = db.GetDbOptions(transaction);
            if(opts.ContainsKey("blocksize") && (!options.RawOptions.TryGetValue("blocksize", out n) || string.IsNullOrEmpty(n)))
                options.RawOptions["blocksize"] = opts["blocksize"] + "b";

            if (opts.ContainsKey("blockhash") && (!options.RawOptions.TryGetValue("block-hash-algorithm", out n) || string.IsNullOrEmpty(n)))
                options.RawOptions["block-hash-algorithm"] = opts["blockhash"];
            if (opts.ContainsKey("filehash") && (!options.RawOptions.TryGetValue("file-hash-algorithm", out n) || string.IsNullOrEmpty(n)))
                options.RawOptions["file-hash-algorithm"] = opts["filehash"];
        }
        /// <summary>
        /// Run the recreate procedure
        /// </summary>
        /// <param name="path">Path to the database that will be created</param>
        /// <param name="filelistfilter">A filter that can be used to disregard certain remote files, intended to be used to select a certain filelist</param>
        /// <param name="filenamefilter">Filters the files in a filelist to prevent downloading unwanted data</param>
        /// <param name="blockprocessor">A callback hook that can be used to work with downloaded block volumes, intended to be use to recover data blocks while processing blocklists</param>
        public void Run(string path, Library.Utility.IFilter filter = null, NumberedFilterFilelistDelegate filelistfilter = null, BlockVolumePostProcessor blockprocessor = null)
        {
            if (System.IO.File.Exists(path))
                throw new Exception(string.Format("Cannot recreate database because file already exists: {0}", path));

            using(var db = new LocalDatabase(path, "Recreate"))
            {
                m_result.SetDatabase(db);
                DoRun(db, filter, filelistfilter, blockprocessor);
                db.WriteResults();
            }
        }
        /// <summary>
        /// Updates a database with new path information from a remote fileset
        /// </summary>
        /// <param name="filelistfilter">A filter that can be used to disregard certain remote files, intended to be used to select a certain filelist</param>
        /// <param name="filter">Filters the files in a filelist to prevent downloading unwanted data</param>
        /// <param name="blockprocessor">A callback hook that can be used to work with downloaded block volumes, intended to be use to recover data blocks while processing blocklists</param>
        public void RunUpdate(Library.Utility.IFilter filter = null, NumberedFilterFilelistDelegate filelistfilter = null, BlockVolumePostProcessor blockprocessor = null)
        {
            if (!m_options.RepairOnlyPaths)
                throw new Exception(string.Format("Can only update with paths, try setting {0}", "--repair-only-paths"));
            
            using(var db = new LocalDatabase(m_options.Dbpath, "Recreate"))
            {
                m_result.SetDatabase(db);

                if (db.FindMatchingFilesets(m_options.Time, m_options.Version).Any())
                    throw new Exception(string.Format("The version(s) being updated to, already exists"));

                Utility.UpdateOptionsFromDb(db, m_options, null);
                DoRun(db, true, filter, filelistfilter, blockprocessor);
                db.WriteResults();
            }
        }
示例#8
0
        /// <summary>
        /// Helper method that verifies uploaded volumes and updates their state in the database.
        /// Throws an error if there are issues with the remote storage
        /// </summary>
        /// <param name="backend">The backend instance to use</param>
        /// <param name="options">The options used</param>
        /// <param name="database">The database to compare with</param>
        public static void VerifyRemoteList(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log)
		{
			var tp = RemoteListAnalysis(backend, options, database, log);
			long extraCount = 0;
			long missingCount = 0;
            
			foreach(var n in tp.ExtraVolumes)
			{
				log.AddWarning(string.Format("Extra unknown file: {0}", n.File.Name), null);
				extraCount++;
			}

			foreach(var n in tp.MissingVolumes)
			{
				log.AddWarning(string.Format("Missing file: {0}", n.Name), null);
				missingCount++;
			}

			if (extraCount > 0)
			{
				var s = string.Format("Found {0} remote files that are not recorded in local storage, please run repair", extraCount);
				log.AddError(s, null);
				throw new Exception(s);
			}

            if (missingCount > 0)
            {
            	string s;
                if (!tp.BackupPrefixes.Contains(options.Prefix) && tp.BackupPrefixes.Length > 0)
                	s = string.Format("Found {0} files that are missing from the remote storage, and no files with the backup prefix {1}, but found the following backup prefixes: {2}", missingCount, options.Prefix, string.Join(", ", tp.BackupPrefixes));
                else
                	s = string.Format("Found {0} files that are missing from the remote storage, please run repair", missingCount);
                
                log.AddError(s, null);
                throw new Exception(s);
            }            
        }
示例#9
0
        /// <summary>
        /// Run the recreate procedure
        /// </summary>
        /// <param name="dbparent">The database to restore into</param>
        /// <param name="updating">True if this is an update call, false otherwise</param>
        /// <param name="filter">A filter that can be used to disregard certain remote files, intended to be used to select a certain filelist</param>
        /// <param name="filelistfilter">Filters the files in a filelist to prevent downloading unwanted data</param>
        /// <param name="blockprocessor">A callback hook that can be used to work with downloaded block volumes, intended to be use to recover data blocks while processing blocklists</param>
        internal void DoRun(LocalDatabase dbparent, bool updating, Library.Utility.IFilter filter = null, NumberedFilterFilelistDelegate filelistfilter = null, BlockVolumePostProcessor blockprocessor = null)
        {
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Recreate_Running);

            //We build a local database in steps.
            using (var restoredb = new LocalRecreateDatabase(dbparent, m_options))
                using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, restoredb))
                {
                    restoredb.RepairInProgress = true;
                    var autoDetectBlockSize = !(m_options.HasBlocksize && restoredb.GetDbOptions().ContainsKey("blocksize"));
                    var volumeIds           = new Dictionary <string, long>();

                    var rawlist = backend.List();

                    //First step is to examine the remote storage to see what
                    // kind of data we can find
                    var remotefiles =
                        (from x in rawlist
                         let n = VolumeBase.ParseFilename(x)
                                 where
                                 n != null
                                 &&
                                 n.Prefix == m_options.Prefix
                                 select n).ToArray(); //ToArray() ensures that we do not remote-request it multiple times

                    if (remotefiles.Length == 0)
                    {
                        if (rawlist.Count == 0)
                        {
                            throw new UserInformationException("No files were found at the remote location, perhaps the target url is incorrect?", "EmptyRemoteLocation");
                        }
                        else
                        {
                            var tmp =
                                (from x in rawlist
                                 let n = VolumeBase.ParseFilename(x)
                                         where
                                         n != null
                                         select n.Prefix).ToArray();

                            var types = tmp.Distinct().ToArray();
                            if (tmp.Length == 0)
                            {
                                throw new UserInformationException(string.Format("Found {0} files at the remote storage, but none that could be parsed", rawlist.Count), "EmptyRemoteLocation");
                            }
                            else if (types.Length == 1)
                            {
                                throw new UserInformationException(string.Format("Found {0} parse-able files with the prefix {1}, did you forget to set the backup prefix?", tmp.Length, types[0]), "EmptyRemoteLocationWithPrefix");
                            }
                            else
                            {
                                throw new UserInformationException(string.Format("Found {0} parse-able files (of {1} files) with different prefixes: {2}, did you forget to set the backup prefix?", tmp.Length, rawlist.Count, string.Join(", ", types)), "EmptyRemoteLocationWithPrefix");
                            }
                        }
                    }

                    //Then we select the filelist we should work with,
                    // and create the filelist table to fit
                    IEnumerable <IParsedVolume> filelists =
                        from n in remotefiles
                        where n.FileType == RemoteVolumeType.Files
                        orderby n.Time descending
                        select n;

                    if (filelists.Count() <= 0)
                    {
                        throw new UserInformationException("No filelists found on the remote destination", "EmptyRemoteLocation");
                    }

                    if (filelistfilter != null)
                    {
                        filelists = filelistfilter(filelists).Select(x => x.Value).ToArray();
                    }

                    if (filelists.Count() <= 0)
                    {
                        throw new UserInformationException("No filelists", "NoMatchingRemoteFilelists");
                    }

                    // If we are updating, all files should be accounted for
                    foreach (var fl in remotefiles)
                    {
                        volumeIds[fl.File.Name] = updating ? restoredb.GetRemoteVolumeID(fl.File.Name) : restoredb.RegisterRemoteVolume(fl.File.Name, fl.FileType, fl.File.Size, RemoteVolumeState.Uploaded);
                    }

                    var hasUpdatedOptions = false;

                    if (updating)
                    {
                        Utility.UpdateOptionsFromDb(restoredb, m_options);
                        Utility.VerifyParameters(restoredb, m_options);
                    }

                    //Record all blocksets and files needed
                    using (var tr = restoredb.BeginTransaction())
                    {
                        var filelistWork = (from n in filelists orderby n.Time select new RemoteVolume(n.File) as IRemoteVolume).ToList();
                        Logging.Log.WriteInformationMessage(LOGTAG, "RebuildStarted", "Rebuild database started, downloading {0} filelists", filelistWork.Count);

                        var progress = 0;

                        // Register the files we are working with, if not already updated
                        if (updating)
                        {
                            foreach (var n in filelists)
                            {
                                if (volumeIds[n.File.Name] == -1)
                                {
                                    volumeIds[n.File.Name] = restoredb.RegisterRemoteVolume(n.File.Name, n.FileType, RemoteVolumeState.Uploaded, n.File.Size, new TimeSpan(0), tr);
                                }
                            }
                        }

                        var isFirstFilelist = true;
                        var blocksize       = m_options.Blocksize;
                        var hashes_pr_block = blocksize / m_options.BlockhashSize;

                        foreach (var entry in new AsyncDownloader(filelistWork, backend))
                        {
                            try
                            {
                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    backend.WaitForComplete(restoredb, null);
                                    m_result.EndTime = DateTime.UtcNow;
                                    return;
                                }

                                progress++;
                                if (filelistWork.Count == 1 && m_options.RepairOnlyPaths)
                                {
                                    m_result.OperationProgressUpdater.UpdateProgress(0.5f);
                                }
                                else
                                {
                                    m_result.OperationProgressUpdater.UpdateProgress(((float)progress / filelistWork.Count()) * (m_options.RepairOnlyPaths ? 1f : 0.2f));
                                }

                                using (var tmpfile = entry.TempFile)
                                {
                                    isFirstFilelist = false;

                                    if (entry.Hash != null && entry.Size > 0)
                                    {
                                        restoredb.UpdateRemoteVolume(entry.Name, RemoteVolumeState.Verified, entry.Size, entry.Hash, tr);
                                    }

                                    var parsed = VolumeBase.ParseFilename(entry.Name);

                                    if (!hasUpdatedOptions && (!updating || autoDetectBlockSize))
                                    {
                                        VolumeReaderBase.UpdateOptionsFromManifest(parsed.CompressionModule, tmpfile, m_options);
                                        hasUpdatedOptions = true;
                                        // Recompute the cached sizes
                                        blocksize       = m_options.Blocksize;
                                        hashes_pr_block = blocksize / m_options.BlockhashSize;
                                    }


                                    // Create timestamped operations based on the file timestamp
                                    var filesetid = restoredb.CreateFileset(volumeIds[entry.Name], parsed.Time, tr);
                                    using (var filelistreader = new FilesetVolumeReader(parsed.CompressionModule, tmpfile, m_options))
                                        foreach (var fe in filelistreader.Files.Where(x => Library.Utility.FilterExpression.Matches(filter, x.Path)))
                                        {
                                            try
                                            {
                                                var expectedmetablocks          = (fe.Metasize + blocksize - 1) / blocksize;
                                                var expectedmetablocklisthashes = (expectedmetablocks + hashes_pr_block - 1) / hashes_pr_block;
                                                if (expectedmetablocks <= 1)
                                                {
                                                    expectedmetablocklisthashes = 0;
                                                }

                                                var metadataid = long.MinValue;
                                                switch (fe.Type)
                                                {
                                                case FilelistEntryType.Folder:
                                                    metadataid = restoredb.AddMetadataset(fe.Metahash, fe.Metasize, fe.MetaBlocklistHashes, expectedmetablocklisthashes, tr);
                                                    restoredb.AddDirectoryEntry(filesetid, fe.Path, fe.Time, metadataid, tr);
                                                    break;

                                                case FilelistEntryType.File:
                                                    var expectedblocks          = (fe.Size + blocksize - 1) / blocksize;
                                                    var expectedblocklisthashes = (expectedblocks + hashes_pr_block - 1) / hashes_pr_block;
                                                    if (expectedblocks <= 1)
                                                    {
                                                        expectedblocklisthashes = 0;
                                                    }

                                                    var blocksetid = restoredb.AddBlockset(fe.Hash, fe.Size, fe.BlocklistHashes, expectedblocklisthashes, tr);
                                                    metadataid = restoredb.AddMetadataset(fe.Metahash, fe.Metasize, fe.MetaBlocklistHashes, expectedmetablocklisthashes, tr);
                                                    restoredb.AddFileEntry(filesetid, fe.Path, fe.Time, blocksetid, metadataid, tr);

                                                    if (fe.Size <= blocksize)
                                                    {
                                                        if (!string.IsNullOrWhiteSpace(fe.Blockhash))
                                                        {
                                                            restoredb.AddSmallBlocksetLink(fe.Hash, fe.Blockhash, fe.Blocksize, tr);
                                                        }
                                                        else if (m_options.BlockHashAlgorithm == m_options.FileHashAlgorithm)
                                                        {
                                                            restoredb.AddSmallBlocksetLink(fe.Hash, fe.Hash, fe.Size, tr);
                                                        }
                                                        else
                                                        {
                                                            Logging.Log.WriteWarningMessage(LOGTAG, "MissingBlockHash", null, "No block hash found for file: {0}", fe.Path);
                                                        }
                                                    }

                                                    break;

                                                case FilelistEntryType.Symlink:
                                                    metadataid = restoredb.AddMetadataset(fe.Metahash, fe.Metasize, fe.MetaBlocklistHashes, expectedmetablocklisthashes, tr);
                                                    restoredb.AddSymlinkEntry(filesetid, fe.Path, fe.Time, metadataid, tr);
                                                    break;

                                                default:
                                                    Logging.Log.WriteWarningMessage(LOGTAG, "SkippingUnknownFileEntry", null, "Skipping file-entry with unknown type {0}: {1} ", fe.Type, fe.Path);
                                                    break;
                                                }

                                                if (fe.Metasize <= blocksize && (fe.Type == FilelistEntryType.Folder || fe.Type == FilelistEntryType.File || fe.Type == FilelistEntryType.Symlink))
                                                {
                                                    if (!string.IsNullOrWhiteSpace(fe.Metablockhash))
                                                    {
                                                        restoredb.AddSmallBlocksetLink(fe.Metahash, fe.Metablockhash, fe.Metasize, tr);
                                                    }
                                                    else if (m_options.BlockHashAlgorithm == m_options.FileHashAlgorithm)
                                                    {
                                                        restoredb.AddSmallBlocksetLink(fe.Metahash, fe.Metahash, fe.Metasize, tr);
                                                    }
                                                    else
                                                    {
                                                        Logging.Log.WriteWarningMessage(LOGTAG, "MissingMetadataBlockHash", null, "No block hash found for file metadata: {0}", fe.Path);
                                                    }
                                                }
                                            }
                                            catch (Exception ex)
                                            {
                                                Logging.Log.WriteWarningMessage(LOGTAG, "FileEntryProcessingFailed", ex, "Failed to process file-entry: {0}", fe.Path);
                                            }
                                        }
                                }
                            }
                            catch (Exception ex)
                            {
                                Logging.Log.WriteWarningMessage(LOGTAG, "FileProcessingFailed", ex, "Failed to process file: {0}", entry.Name);
                                if (ex is System.Threading.ThreadAbortException)
                                {
                                    m_result.EndTime = DateTime.UtcNow;
                                    throw;
                                }

                                if (isFirstFilelist && ex is System.Security.Cryptography.CryptographicException)
                                {
                                    m_result.EndTime = DateTime.UtcNow;
                                    throw;
                                }

                                if (m_options.UnittestMode)
                                {
                                    throw;
                                }
                            }
                        }

                        //Make sure we write the config
                        if (!updating)
                        {
                            Utility.VerifyParameters(restoredb, m_options, tr);
                        }

                        using (new Logging.Timer(LOGTAG, "CommitUpdateFilesetFromRemote", "CommitUpdateFilesetFromRemote"))
                            tr.Commit();
                    }

                    if (!m_options.RepairOnlyPaths)
                    {
                        var hashalg = Library.Utility.HashAlgorithmHelper.Create(m_options.BlockHashAlgorithm);
                        if (hashalg == null)
                        {
                            throw new UserInformationException(Strings.Common.InvalidHashAlgorithm(m_options.BlockHashAlgorithm), "BlockHashAlgorithmNotSupported");
                        }
                        var hashsize = hashalg.HashSize / 8;

                        //Grab all index files, and update the block table
                        using (var tr = restoredb.BeginTransaction())
                        {
                            var indexfiles = (
                                from n in remotefiles
                                where n.FileType == RemoteVolumeType.Index
                                select new RemoteVolume(n.File) as IRemoteVolume).ToList();

                            Logging.Log.WriteInformationMessage(LOGTAG, "FilelistsRestored", "Filelists restored, downloading {0} index files", indexfiles.Count);

                            var progress = 0;

                            foreach (var sf in new AsyncDownloader(indexfiles, backend))
                            {
                                try
                                {
                                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                    {
                                        backend.WaitForComplete(restoredb, null);
                                        m_result.EndTime = DateTime.UtcNow;
                                        return;
                                    }

                                    progress++;
                                    m_result.OperationProgressUpdater.UpdateProgress((((float)progress / indexfiles.Count) * 0.5f) + 0.2f);

                                    using (var tmpfile = sf.TempFile)
                                    {
                                        if (sf.Hash != null && sf.Size > 0)
                                        {
                                            restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Verified, sf.Size, sf.Hash, tr);
                                        }

                                        using (var svr = new IndexVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options, hashsize))
                                        {
                                            foreach (var a in svr.Volumes)
                                            {
                                                var filename = a.Filename;
                                                var volumeID = restoredb.GetRemoteVolumeID(filename);

                                                // No such file
                                                if (volumeID < 0)
                                                {
                                                    volumeID = ProbeForMatchingFilename(ref filename, restoredb);
                                                }

                                                var missing = false;
                                                // Still broken, register a missing item
                                                if (volumeID < 0)
                                                {
                                                    var p = VolumeBase.ParseFilename(filename);
                                                    if (p == null)
                                                    {
                                                        throw new Exception(string.Format("Unable to parse filename: {0}", filename));
                                                    }
                                                    Logging.Log.WriteErrorMessage(LOGTAG, "MissingFileDetected", null, "Remote file referenced as {0} by {1}, but not found in list, registering a missing remote file", filename, sf.Name);
                                                    missing  = true;
                                                    volumeID = restoredb.RegisterRemoteVolume(filename, p.FileType, RemoteVolumeState.Temporary, tr);
                                                }

                                                //Add all block/volume mappings
                                                foreach (var b in a.Blocks)
                                                {
                                                    restoredb.UpdateBlock(b.Key, b.Value, volumeID, tr);
                                                }

                                                restoredb.UpdateRemoteVolume(filename, missing ? RemoteVolumeState.Temporary : RemoteVolumeState.Verified, a.Length, a.Hash, tr);
                                                restoredb.AddIndexBlockLink(restoredb.GetRemoteVolumeID(sf.Name), volumeID, tr);
                                            }

                                            //If there are blocklists in the index file, update the blocklists
                                            foreach (var b in svr.BlockLists)
                                            {
                                                restoredb.UpdateBlockset(b.Hash, b.Blocklist, tr);
                                            }
                                        }
                                    }
                                }
                                catch (Exception ex)
                                {
                                    //Not fatal
                                    Logging.Log.WriteErrorMessage(LOGTAG, "IndexFileProcessingFailed", ex, "Failed to process index file: {0}", sf.Name);
                                    if (ex is System.Threading.ThreadAbortException)
                                    {
                                        m_result.EndTime = DateTime.UtcNow;
                                        throw;
                                    }

                                    if (m_options.UnittestMode)
                                    {
                                        throw;
                                    }
                                }
                            }

                            using (new Logging.Timer(LOGTAG, "CommitRecreateDb", "CommitRecreatedDb"))
                                tr.Commit();

                            // TODO: In some cases, we can avoid downloading all index files,
                            // if we are lucky and pick the right ones
                        }

                        // In some cases we have a stale reference from an index file to a deleted block file
                        if (!m_options.UnittestMode)
                        {
                            restoredb.CleanupMissingVolumes();
                        }

                        // We have now grabbed as much information as possible,
                        // if we are still missing data, we must now fetch block files
                        restoredb.FindMissingBlocklistHashes(hashsize, m_options.Blocksize, null);

                        //We do this in three passes
                        for (var i = 0; i < 3; i++)
                        {
                            // Grab the list matching the pass type
                            var lst = restoredb.GetMissingBlockListVolumes(i, m_options.Blocksize, hashsize).ToList();
                            if (lst.Count > 0)
                            {
                                var fullist = ": " + string.Join(", ", lst.Select(x => x.Name));
                                switch (i)
                                {
                                case 0:
                                    Logging.Log.WriteVerboseMessage(LOGTAG, "ProcessingRequiredBlocklistVolumes", "Processing required {0} blocklist volumes{1}", lst.Count, fullist);
                                    Logging.Log.WriteInformationMessage(LOGTAG, "ProcessingRequiredBlocklistVolumes", "Processing required {0} blocklist volumes{1}", lst.Count, m_options.FullResult ? fullist : string.Empty);
                                    break;

                                case 1:
                                    Logging.Log.WriteVerboseMessage(LOGTAG, "ProbingCandicateBlocklistVolumes", "Probing {0} candidate blocklist volumes{1}", lst.Count, fullist);
                                    Logging.Log.WriteInformationMessage(LOGTAG, "ProbingCandicateBlocklistVolumes", "Probing {0} candidate blocklist volumes{1}", lst.Count, m_options.FullResult ? fullist : string.Empty);
                                    break;

                                default:
                                    Logging.Log.WriteVerboseMessage(LOGTAG, "ProcessingAllBlocklistVolumes", "Processing all of the {0} volumes for blocklists{1}", lst.Count, fullist);
                                    Logging.Log.WriteVerboseMessage(LOGTAG, "ProcessingAllBlocklistVolumes", "Processing all of the {0} volumes for blocklists{1}", lst.Count, m_options.FullResult ? fullist : string.Empty);
                                    break;
                                }
                            }

                            var progress = 0;
                            foreach (var sf in new AsyncDownloader(lst, backend))
                            {
                                try
                                {
                                    using (var tmpfile = sf.TempFile)
                                        using (var rd = new BlockVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options))
                                            using (var tr = restoredb.BeginTransaction())
                                            {
                                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                                {
                                                    backend.WaitForComplete(restoredb, null);
                                                    m_result.EndTime = DateTime.UtcNow;
                                                    return;
                                                }

                                                progress++;
                                                m_result.OperationProgressUpdater.UpdateProgress((((float)progress / lst.Count) * 0.1f) + 0.7f + (i * 0.1f));

                                                var volumeid = restoredb.GetRemoteVolumeID(sf.Name);

                                                restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Uploaded, sf.Size, sf.Hash, tr);

                                                // Update the block table so we know about the block/volume map
                                                foreach (var h in rd.Blocks)
                                                {
                                                    restoredb.UpdateBlock(h.Key, h.Value, volumeid, tr);
                                                }

                                                // Grab all known blocklists from the volume
                                                foreach (var blocklisthash in restoredb.GetBlockLists(volumeid))
                                                {
                                                    restoredb.UpdateBlockset(blocklisthash, rd.ReadBlocklist(blocklisthash, hashsize), tr);
                                                }

                                                // Update tables so we know if we are done
                                                restoredb.FindMissingBlocklistHashes(hashsize, m_options.Blocksize, tr);

                                                using (new Logging.Timer(LOGTAG, "CommitRestoredBlocklist", "CommitRestoredBlocklist"))
                                                    tr.Commit();

                                                //At this point we can patch files with data from the block volume
                                                if (blockprocessor != null)
                                                {
                                                    blockprocessor(sf.Name, rd);
                                                }
                                            }
                                }
                                catch (Exception ex)
                                {
                                    Logging.Log.WriteWarningMessage(LOGTAG, "FailedRebuildingWithFile", ex, "Failed to use information from {0} to rebuild database: {1}", sf.Name, ex.Message);
                                    if (m_options.UnittestMode)
                                    {
                                        throw;
                                    }
                                }
                            }
                        }
                    }

                    backend.WaitForComplete(restoredb, null);

                    // In some cases we have a stale reference from an index file to a deleted block file
                    if (!m_options.UnittestMode)
                    {
                        restoredb.CleanupMissingVolumes();
                    }

                    if (m_options.RepairOnlyPaths)
                    {
                        Logging.Log.WriteInformationMessage(LOGTAG, "RecreateOrUpdateOnly", "Recreate/path-update completed, not running consistency checks");
                    }
                    else
                    {
                        Logging.Log.WriteInformationMessage(LOGTAG, "RecreateCompletedCheckingDatabase", "Recreate completed, verifying the database consistency");

                        //All done, we must verify that we have all blocklist fully intact
                        // if this fails, the db will not be deleted, so it can be used,
                        // except to continue a backup
                        m_result.EndTime = DateTime.UtcNow;

                        using (var lbfdb = new LocalListBrokenFilesDatabase(restoredb))
                        {
                            var broken = lbfdb.GetBrokenFilesets(new DateTime(0), null, null).Count();
                            if (broken != 0)
                            {
                                throw new UserInformationException(string.Format("Recreated database has missing blocks and {0} broken filelists. Consider using \"{1}\" and \"{2}\" to purge broken data from the remote store and the database.", broken, "list-broken-files", "purge-broken-files"), "DatabaseIsBrokenConsiderPurge");
                            }
                        }

                        restoredb.VerifyConsistency(m_options.Blocksize, m_options.BlockhashSize, true, null);

                        Logging.Log.WriteInformationMessage(LOGTAG, "RecreateCompleted", "Recreate completed, and consistency checks completed, marking database as complete");

                        restoredb.RepairInProgress = false;
                    }

                    m_result.EndTime = DateTime.UtcNow;
                }
        }
示例#10
0
        /// <summary>
        /// Helper method that verifies uploaded volumes and updates their state in the database.
        /// Throws an error if there are issues with the remote storage
        /// </summary>
        /// <param name="backend">The backend instance to use</param>
        /// <param name="options">The options used</param>
        /// <param name="database">The database to compare with</param>
        /// <param name="log">The log instance to use</param>
        public static void VerifyRemoteList(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log)
        {
            var  tp           = RemoteListAnalysis(backend, options, database, log);
            long extraCount   = 0;
            long missingCount = 0;

            foreach (var n in tp.ExtraVolumes)
            {
                log.AddWarning(string.Format("Extra unknown file: {0}", n.File.Name), null);
                extraCount++;
            }

            foreach (var n in tp.MissingVolumes)
            {
                log.AddWarning(string.Format("Missing file: {0}", n.Name), null);
                missingCount++;
            }

            if (extraCount > 0)
            {
                var s = string.Format("Found {0} remote files that are not recorded in local storage, please run repair", extraCount);
                log.AddError(s, null);
                throw new Exception(s);
            }

            var lookup  = new Dictionary <string, string>();
            var doubles = new Dictionary <string, string>();

            foreach (var v in tp.ParsedVolumes)
            {
                if (lookup.ContainsKey(v.File.Name))
                {
                    doubles[v.File.Name] = null;
                }
                else
                {
                    lookup[v.File.Name] = null;
                }
            }

            if (doubles.Count > 0)
            {
                var s = string.Format("Found remote files reported as duplicates, either the backend module is broken or you need to manually remove the extra copies.\nThe following files were found multiple times: {0}", string.Join(", ", doubles.Keys));
                log.AddError(s, null);
                throw new Exception(s);
            }

            if (missingCount > 0)
            {
                string s;
                if (!tp.BackupPrefixes.Contains(options.Prefix) && tp.BackupPrefixes.Length > 0)
                {
                    s = string.Format("Found {0} files that are missing from the remote storage, and no files with the backup prefix {1}, but found the following backup prefixes: {2}", missingCount, options.Prefix, string.Join(", ", tp.BackupPrefixes));
                }
                else
                {
                    s = string.Format("Found {0} files that are missing from the remote storage, please run repair", missingCount);
                }

                log.AddError(s, null);
                throw new Exception(s);
            }
        }
示例#11
0
 public bool FlushDbMessages(LocalDatabase database, System.Data.IDbTransaction transaction)
 {
     return m_db.FlushDbMessages(database, transaction);
 }
示例#12
0
        /// <summary>
        /// Helper method that verifies uploaded volumes and updates their state in the database.
        /// Throws an error if there are issues with the remote storage
        /// </summary>
        /// <param name="backend">The backend instance to use</param>
        /// <param name="options">The options used</param>
        /// <param name="database">The database to compare with</param>
        public static RemoteAnalysisResult RemoteListAnalysis(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log)
        {
            var rawlist = backend.List();
            var lookup = new Dictionary<string, Volumes.IParsedVolume>();

            var remotelist = (from n in rawlist
                                       let p = Volumes.VolumeBase.ParseFilename(n)
                                       where p != null
                                       select p).ToList();
            var unknownlist = (from n in rawlist
                                        let p = Volumes.VolumeBase.ParseFilename(n)
                                        where p == null
                                        select n).ToList();
            var filesets = (from n in remotelist
                                     where n.FileType == RemoteVolumeType.Files orderby n.Time descending
                                     select n).ToList();
            
            log.KnownFileCount = remotelist.Count();
            log.KnownFileSize = remotelist.Select(x => x.File.Size).Sum();
            log.UnknownFileCount = unknownlist.Count();
            log.UnknownFileSize = unknownlist.Select(x => x.Size).Sum();
            log.BackupListCount = filesets.Count;
            log.LastBackupDate = filesets.Count == 0 ? new DateTime(0) : filesets[0].Time.ToLocalTime();
            
            if (backend is Library.Interface.IQuotaEnabledBackend)
            {
                log.TotalQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).TotalQuotaSpace;
                log.FreeQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).FreeQuotaSpace;
            }

            log.AssignedQuotaSpace = options.QuotaSize;
            
            foreach(var s in remotelist)
                if (s.Prefix == options.Prefix)
                    lookup[s.File.Name] = s;
                    
            var missing = new List<RemoteVolumeEntry>();
            var missingHash = new List<Tuple<long, RemoteVolumeEntry>>();
            var locallist = database.GetRemoteVolumes();
            foreach(var i in locallist)
            {
                Volumes.IParsedVolume r;
                var remoteFound = lookup.TryGetValue(i.Name, out r);
                var correctSize = remoteFound && i.Size >= 0 && (i.Size == r.File.Size || r.File.Size < 0);

                lookup.Remove(i.Name);

                switch (i.State)
                {
                    case RemoteVolumeState.Deleted:
                        if (remoteFound)
                            log.AddMessage(string.Format("ignoring remote file listed as {0}: {1}", i.State, i.Name));

                        break;

                    case RemoteVolumeState.Temporary:
                    case RemoteVolumeState.Deleting:
                        if (remoteFound)
                        {
                            log.AddMessage(string.Format("removing remote file listed as {0}: {1}", i.State, i.Name));
                            backend.Delete(i.Name, i.Size, true);
                        }
                        else
                        {
                            log.AddMessage(string.Format("removing file listed as {0}: {1}", i.State, i.Name));
                            database.RemoveRemoteVolume(i.Name, null);
                        }
                        break;
                    case RemoteVolumeState.Uploading:
                        if (remoteFound && correctSize && r.File.Size >= 0)
                        {
                            log.AddMessage(string.Format("promoting uploaded complete file from {0} to {2}: {1}", i.State, i.Name, RemoteVolumeState.Uploaded));
                            database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Uploaded, i.Size, i.Hash);
                        }
                        else
                        {
                            log.AddMessage(string.Format("removing incomplete remote file listed as {0}: {1}", i.State, i.Name));
                            backend.Delete(i.Name, i.Size, true);
                        }
                        break;

                    case RemoteVolumeState.Uploaded:
                        if (!remoteFound)
                            missing.Add(i);
                        else if (correctSize)
                            database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Verified, i.Size, i.Hash);
                        else
                            missingHash.Add(new Tuple<long, RemoteVolumeEntry>(r.File.Size, i));

                        break;

                    case RemoteVolumeState.Verified:
                        if (!remoteFound)
                            missing.Add(i);
                        else if (!correctSize)
                            missingHash.Add(new Tuple<long, RemoteVolumeEntry>(r.File.Size, i));

                        break;
                
                    default:
                        log.AddWarning(string.Format("unknown state for remote file listed as {0}: {1}", i.State, i.Name), null);
                        break;
                }

            }

            foreach(var i in missingHash)
                log.AddWarning(string.Format("remote file {1} is listed as {0} with size {2} but should be {3}, please verify the sha256 hash \"{4}\"", i.Item2.State, i.Item2.Name, i.Item1, i.Item2.Size, i.Item2.Hash), null);
            
            return new RemoteAnalysisResult() { ParsedVolumes = remotelist, ExtraVolumes = lookup.Values, MissingVolumes = missing, VerificationRequiredVolumes = missingHash.Select(x => x.Item2) };
        }
 private ConnectionManager(ISqliteConfig sqliteConfig)
 {
     var conn = new SQLiteConnection(sqliteConfig.Path);
     database = new LocalDatabase(conn);
 }
示例#14
0
 internal CoreStateMachines(ReplicatedTransactionStateMachine replicatedTxStateMachine, ReplicatedTokenStateMachine labelTokenStateMachine, ReplicatedTokenStateMachine relationshipTypeTokenStateMachine, ReplicatedTokenStateMachine propertyKeyTokenStateMachine, ReplicatedLockTokenStateMachine replicatedLockTokenStateMachine, ReplicatedIdAllocationStateMachine idAllocationStateMachine, DummyMachine benchmarkMachine, LocalDatabase localDatabase, RecoverConsensusLogIndex consensusLogIndexRecovery)
 {
     if (!InstanceFieldsInitialized)
     {
         InitializeInstanceFields();
         InstanceFieldsInitialized = true;
     }
     this._replicatedTxStateMachine          = replicatedTxStateMachine;
     this._labelTokenStateMachine            = labelTokenStateMachine;
     this._relationshipTypeTokenStateMachine = relationshipTypeTokenStateMachine;
     this._propertyKeyTokenStateMachine      = propertyKeyTokenStateMachine;
     this._replicatedLockTokenStateMachine   = replicatedLockTokenStateMachine;
     this._idAllocationStateMachine          = idAllocationStateMachine;
     this._benchmarkMachine          = benchmarkMachine;
     this._localDatabase             = localDatabase;
     this._consensusLogIndexRecovery = consensusLogIndexRecovery;
 }
示例#15
0
 // When this page appearing it will get all the geoData from database and display on the list
 protected override void OnAppearing()
 {
     base.OnAppearing();
     // Data Binding the listView Itemsource to the Geodata list from Local SQLite Database'
     listView.ItemsSource = LocalDatabase.GetAllGeoDataSet();
 }
        /// <summary>
        /// Run the recreate procedure
        /// </summary>
        /// <param name="dbparent">The database to restore into</param>
        /// <param name="updating">True if this is an update call, false otherwise</param>
        /// <param name="filter">A filter that can be used to disregard certain remote files, intended to be used to select a certain filelist</param>
        /// <param name="filenamefilter">Filters the files in a filelist to prevent downloading unwanted data</param>
        /// <param name="blockprocessor">A callback hook that can be used to work with downloaded block volumes, intended to be use to recover data blocks while processing blocklists</param>
        internal void DoRun(LocalDatabase dbparent, bool updating, Library.Utility.IFilter filter = null, NumberedFilterFilelistDelegate filelistfilter = null, BlockVolumePostProcessor blockprocessor = null)
        {
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Recreate_Running);

            //We build a local database in steps.
            using (var restoredb = new LocalRecreateDatabase(dbparent, m_options))
                using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, restoredb))
                {
                    var volumeIds = new Dictionary <string, long>();

                    var rawlist = backend.List();

                    //First step is to examine the remote storage to see what
                    // kind of data we can find
                    var remotefiles =
                        (from x in rawlist
                         let n = VolumeBase.ParseFilename(x)
                                 where
                                 n != null
                                 &&
                                 n.Prefix == m_options.Prefix
                                 select n).ToArray(); //ToArray() ensures that we do not remote-request it multiple times

                    if (remotefiles.Length == 0)
                    {
                        if (rawlist.Count == 0)
                        {
                            throw new Exception("No files were found at the remote location, perhaps the target url is incorrect?");
                        }
                        else
                        {
                            var tmp =
                                (from x in rawlist
                                 let n = VolumeBase.ParseFilename(x)
                                         where
                                         n != null
                                         select n.Prefix).ToArray();

                            var types = tmp.Distinct().ToArray();
                            if (tmp.Length == 0)
                            {
                                throw new Exception(string.Format("Found {0} files at the remote storage, but none that could be parsed", rawlist.Count));
                            }
                            else if (types.Length == 1)
                            {
                                throw new Exception(string.Format("Found {0} parse-able files with the prefix {1}, did you forget to set the backup-prefix?", tmp.Length, types[0]));
                            }
                            else
                            {
                                throw new Exception(string.Format("Found {0} parse-able files (of {1} files) with different prefixes: {2}, did you forget to set the backup-prefix?", tmp.Length, rawlist.Count, string.Join(", ", types)));
                            }
                        }
                    }

                    //Then we select the filelist we should work with,
                    // and create the filelist table to fit
                    IEnumerable <IParsedVolume> filelists =
                        from n in remotefiles
                        where n.FileType == RemoteVolumeType.Files
                        orderby n.Time descending
                        select n;

                    if (filelists.Count() <= 0)
                    {
                        throw new Exception(string.Format("No filelists found on the remote destination"));
                    }

                    if (filelistfilter != null)
                    {
                        filelists = filelistfilter(filelists).Select(x => x.Value).ToArray();
                    }

                    if (filelists.Count() <= 0)
                    {
                        throw new Exception(string.Format("No filelists"));
                    }

                    // If we are updating, all files should be accounted for
                    foreach (var fl in remotefiles)
                    {
                        volumeIds[fl.File.Name] = updating ? restoredb.GetRemoteVolumeID(fl.File.Name) : restoredb.RegisterRemoteVolume(fl.File.Name, fl.FileType, fl.File.Size, RemoteVolumeState.Uploaded);
                    }

                    var hasUpdatedOptions = false;

                    if (updating)
                    {
                        Utility.UpdateOptionsFromDb(restoredb, m_options);
                        Utility.VerifyParameters(restoredb, m_options);
                    }

                    //Record all blocksets and files needed
                    using (var tr = restoredb.BeginTransaction())
                    {
                        var filelistWork = (from n in filelists orderby n.Time select new RemoteVolume(n.File) as IRemoteVolume).ToList();
                        var progress     = 0;

                        // Register the files we are working with, if not already updated
                        if (updating)
                        {
                            foreach (var n in filelists)
                            {
                                if (volumeIds[n.File.Name] == -1)
                                {
                                    volumeIds[n.File.Name] = restoredb.RegisterRemoteVolume(n.File.Name, n.FileType, RemoteVolumeState.Uploaded, n.File.Size, new TimeSpan(0), tr);
                                }
                            }
                        }


                        foreach (var entry in new AsyncDownloader(filelistWork, backend))
                        {
                            try
                            {
                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    backend.WaitForComplete(restoredb, null);
                                    return;
                                }

                                progress++;
                                if (filelistWork.Count == 1 && m_options.RepairOnlyPaths)
                                {
                                    m_result.OperationProgressUpdater.UpdateProgress(0.5f);
                                }
                                else
                                {
                                    m_result.OperationProgressUpdater.UpdateProgress(((float)progress / filelistWork.Count()) * (m_options.RepairOnlyPaths ? 1f : 0.2f));
                                }

                                using (var tmpfile = entry.TempFile)
                                {
                                    if (entry.Hash != null && entry.Size > 0)
                                    {
                                        restoredb.UpdateRemoteVolume(entry.Name, RemoteVolumeState.Verified, entry.Size, entry.Hash, tr);
                                    }

                                    var parsed = VolumeBase.ParseFilename(entry.Name);

                                    if (!hasUpdatedOptions && !updating)
                                    {
                                        VolumeReaderBase.UpdateOptionsFromManifest(parsed.CompressionModule, tmpfile, m_options);
                                        hasUpdatedOptions = true;
                                    }

                                    // Create timestamped operations based on the file timestamp
                                    var filesetid = restoredb.CreateFileset(volumeIds[entry.Name], parsed.Time, tr);
                                    using (var filelistreader = new FilesetVolumeReader(parsed.CompressionModule, tmpfile, m_options))
                                        foreach (var fe in filelistreader.Files.Where(x => Library.Utility.FilterExpression.Matches(filter, x.Path)))
                                        {
                                            try
                                            {
                                                if (fe.Type == FilelistEntryType.Folder)
                                                {
                                                    restoredb.AddDirectoryEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                                }
                                                else if (fe.Type == FilelistEntryType.File)
                                                {
                                                    var blocksetid = restoredb.AddBlockset(fe.Hash, fe.Size, fe.BlocklistHashes, tr);
                                                    restoredb.AddFileEntry(filesetid, fe.Path, fe.Time, blocksetid, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                                }
                                                else if (fe.Type == FilelistEntryType.Symlink)
                                                {
                                                    restoredb.AddSymlinkEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                                }
                                                else
                                                {
                                                    m_result.AddWarning(string.Format("Skipping file-entry with unknown type {0}: {1} ", fe.Type, fe.Path), null);
                                                }
                                            }
                                            catch (Exception ex)
                                            {
                                                m_result.AddWarning(string.Format("Failed to process file-entry: {0}", fe.Path), ex);
                                            }
                                        }
                                }
                            }
                            catch (Exception ex)
                            {
                                m_result.AddWarning(string.Format("Failed to process file: {0}", entry.Name), ex);
                                if (ex is System.Threading.ThreadAbortException)
                                {
                                    throw;
                                }
                            }
                        }

                        //Make sure we write the config
                        if (!updating)
                        {
                            Utility.VerifyParameters(restoredb, m_options, tr);
                        }

                        using (new Logging.Timer("CommitUpdateFilesetFromRemote"))
                            tr.Commit();
                    }

                    if (!m_options.RepairOnlyPaths)
                    {
                        var hashalg = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm);
                        if (hashalg == null)
                        {
                            throw new Exception(Strings.Foresthash.InvalidHashAlgorithm(m_options.BlockHashAlgorithm));
                        }
                        var hashsize = hashalg.HashSize / 8;

                        //Grab all index files, and update the block table
                        using (var tr = restoredb.BeginTransaction())
                        {
                            var indexfiles = (
                                from n in remotefiles
                                where n.FileType == RemoteVolumeType.Index
                                select new RemoteVolume(n.File) as IRemoteVolume).ToList();

                            var progress = 0;

                            foreach (var sf in new AsyncDownloader(indexfiles, backend))
                            {
                                try
                                {
                                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                    {
                                        backend.WaitForComplete(restoredb, null);
                                        return;
                                    }

                                    progress++;
                                    m_result.OperationProgressUpdater.UpdateProgress((((float)progress / indexfiles.Count) * 0.5f) + 0.2f);

                                    using (var tmpfile = sf.TempFile)
                                    {
                                        if (sf.Hash != null && sf.Size > 0)
                                        {
                                            restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Verified, sf.Size, sf.Hash, tr);
                                        }

                                        using (var svr = new IndexVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options, hashsize))
                                        {
                                            foreach (var a in svr.Volumes)
                                            {
                                                var filename = a.Filename;
                                                var volumeID = restoredb.GetRemoteVolumeID(filename);

                                                // No such file
                                                if (volumeID < 0)
                                                {
                                                    volumeID = ProbeForMatchingFilename(ref filename, restoredb);
                                                }

                                                // Still broken, register a missing item
                                                if (volumeID < 0)
                                                {
                                                    var p = VolumeBase.ParseFilename(filename);
                                                    if (p == null)
                                                    {
                                                        throw new Exception(string.Format("Unable to parse filename: {0}", filename));
                                                    }
                                                    m_result.AddError(string.Format("Remote file referenced as {0}, but not found in list, registering a missing remote file", filename), null);
                                                    volumeID = restoredb.RegisterRemoteVolume(filename, p.FileType, RemoteVolumeState.Verified, tr);
                                                }

                                                //Add all block/volume mappings
                                                foreach (var b in a.Blocks)
                                                {
                                                    restoredb.UpdateBlock(b.Key, b.Value, volumeID, tr);
                                                }

                                                restoredb.UpdateRemoteVolume(filename, RemoteVolumeState.Verified, a.Length, a.Hash, tr);
                                                restoredb.AddIndexBlockLink(restoredb.GetRemoteVolumeID(sf.Name), volumeID, tr);
                                            }

                                            //If there are blocklists in the index file, update the blocklists
                                            foreach (var b in svr.BlockLists)
                                            {
                                                restoredb.UpdateBlockset(b.Hash, b.Blocklist, tr);
                                            }
                                        }
                                    }
                                }
                                catch (Exception ex)
                                {
                                    //Not fatal
                                    m_result.AddWarning(string.Format("Failed to process index file: {0}", sf.Name), ex);
                                    if (ex is System.Threading.ThreadAbortException)
                                    {
                                        throw;
                                    }
                                }
                            }

                            using (new Logging.Timer("CommitRecreatedDb"))
                                tr.Commit();

                            // TODO: In some cases, we can avoid downloading all index files,
                            // if we are lucky and pick the right ones
                        }

                        // We have now grabbed as much information as possible,
                        // if we are still missing data, we must now fetch block files
                        restoredb.FindMissingBlocklistHashes(hashsize, m_options.Blocksize, null);

                        //We do this in three passes
                        for (var i = 0; i < 3; i++)
                        {
                            // Grab the list matching the pass type
                            var lst = restoredb.GetMissingBlockListVolumes(i).ToList();
                            if (lst.Count > 0)
                            {
                                switch (i)
                                {
                                case 0:
                                    if (m_options.Verbose)
                                    {
                                        m_result.AddVerboseMessage("Processing required {0} blocklist volumes: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name)));
                                    }
                                    else
                                    {
                                        m_result.AddMessage(string.Format("Processing required {0} blocklist volumes", lst.Count));
                                    }
                                    break;

                                case 1:
                                    if (m_options.Verbose)
                                    {
                                        m_result.AddVerboseMessage("Probing {0} candidate blocklist volumes: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name)));
                                    }
                                    else
                                    {
                                        m_result.AddMessage(string.Format("Probing {0} candidate blocklist volumes", lst.Count));
                                    }
                                    break;

                                default:
                                    if (m_options.Verbose)
                                    {
                                        m_result.AddVerboseMessage("Processing all of the {0} volumes for blocklists: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name)));
                                    }
                                    else
                                    {
                                        m_result.AddMessage(string.Format("Processing all of the {0} volumes for blocklists", lst.Count));
                                    }
                                    break;
                                }
                            }

                            var progress = 0;
                            foreach (var sf in new AsyncDownloader(lst, backend))
                            {
                                using (var tmpfile = sf.TempFile)
                                    using (var rd = new BlockVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options))
                                        using (var tr = restoredb.BeginTransaction())
                                        {
                                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                            {
                                                backend.WaitForComplete(restoredb, null);
                                                return;
                                            }

                                            progress++;
                                            m_result.OperationProgressUpdater.UpdateProgress((((float)progress / lst.Count) * 0.1f) + 0.7f + (i * 0.1f));

                                            var volumeid = restoredb.GetRemoteVolumeID(sf.Name);

                                            restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Uploaded, sf.Size, sf.Hash, tr);

                                            // Update the block table so we know about the block/volume map
                                            foreach (var h in rd.Blocks)
                                            {
                                                restoredb.UpdateBlock(h.Key, h.Value, volumeid, tr);
                                            }

                                            // Grab all known blocklists from the volume
                                            foreach (var blocklisthash in restoredb.GetBlockLists(volumeid))
                                            {
                                                restoredb.UpdateBlockset(blocklisthash, rd.ReadBlocklist(blocklisthash, hashsize), tr);
                                            }

                                            // Update tables so we know if we are done
                                            restoredb.FindMissingBlocklistHashes(hashsize, m_options.Blocksize, tr);

                                            using (new Logging.Timer("CommitRestoredBlocklist"))
                                                tr.Commit();

                                            //At this point we can patch files with data from the block volume
                                            if (blockprocessor != null)
                                            {
                                                blockprocessor(sf.Name, rd);
                                            }
                                        }
                            }
                        }
                    }

                    backend.WaitForComplete(restoredb, null);

                    //All done, we must verify that we have all blocklist fully intact
                    // if this fails, the db will not be deleted, so it can be used,
                    // except to continue a backup
                    restoredb.VerifyConsistency(null, m_options.Blocksize, m_options.BlockhashSize);
                }
        }
示例#17
0
 private void PruneFailedDownload(string docId)
 {
     LocalDatabase.DeleteLocalDocument(docId);
 }
示例#18
0
 private static bool MigrateFrom_1_0(LocalDatabase localDatabase, DatabaseMetadata databaseMetadata)
 {
     return(AddFilesTable(localDatabase) && RecreateFictionTables(localDatabase) && AddFileIdColumns(localDatabase) &&
            UpdateMetadata(localDatabase, databaseMetadata));
 }
示例#19
0
 private static bool MigrateFrom_1_2_1(LocalDatabase localDatabase, DatabaseMetadata databaseMetadata)
 {
     return(RecreateFictionTables(localDatabase) && UpdateMetadata(localDatabase, databaseMetadata));
 }
        internal override void BeginReplicating()
        {
            Log.D(TAG, "beginReplicating() called");

            // If we're still waiting to create the remote db, do nothing now. (This method will be
            // re-invoked after that request finishes; see maybeCreateRemoteDB() above.)
            if (_creatingTarget)
            {
                Log.D(TAG, "creatingTarget == true, doing nothing");
                return;
            }

            _pendingSequences = new SortedDictionary <long, int>();
            try {
                _maxPendingSequence = Int64.Parse(LastSequence);
            } catch (Exception e) {
                Log.W(TAG, "Error converting lastSequence: " + LastSequence + " to long. Using 0", e);
                _maxPendingSequence = 0;
            }

            if (Filter != null)
            {
                _filter = LocalDatabase.GetFilter(Filter);
            }
            else
            {
                // If not filter function was provided, but DocIds were
                // specified, then only push the documents listed in the
                // DocIds property. It is assumed that if the users
                // specified both a filter name and doc ids that their
                // custom filter function will handle that. This is
                // consistent with the iOS behavior.
                if (DocIds != null && DocIds.Any())
                {
                    _filter = (rev, filterParams) => DocIds.Contains(rev.Document.Id);
                }
            }

            if (Filter != null && _filter == null)
            {
                Log.W(TAG, string.Format("{0}: No ReplicationFilter registered for filter '{1}'; ignoring", this, Filter));
            }

            // Process existing changes since the last push:
            long lastSequenceLong = 0;

            if (LastSequence != null)
            {
                lastSequenceLong = long.Parse(LastSequence);
            }

            // Now listen for future changes (in continuous mode):
            // Note:  This needs to happen before adding the observer
            // or else there is a race condition.
            // A document could be added between the call to
            // ChangesSince and adding the observer, which would result
            // in a document being skipped
            if (Continuous)
            {
                _observing             = true;
                LocalDatabase.Changed += OnChanged;
            }

            var options = new ChangesOptions();

            options.IncludeConflicts = true;
            var changes = LocalDatabase.ChangesSince(lastSequenceLong, options, _filter, FilterParams);

            if (changes.Count > 0)
            {
                Batcher.QueueObjects(changes);
                Batcher.Flush();
            }

            if (Continuous)
            {
                if (changes.Count == 0)
                {
                    FireTrigger(ReplicationTrigger.WaitingForChanges);
                }
            }
            else
            {
                if (changes.Count == 0)
                {
                    FireTrigger(ReplicationTrigger.StopGraceful);
                }
            }
        }
示例#21
0
        internal static void VerifyParameters(LocalDatabase db, Options options, System.Data.IDbTransaction transaction = null)
        {
            var newDict = new Dictionary <string, string>();

            newDict.Add("blocksize", options.Blocksize.ToString());
            newDict.Add("blockhash", options.BlockHashAlgorithm);
            newDict.Add("filehash", options.FileHashAlgorithm);
            var opts = db.GetDbOptions(transaction);

            if (options.NoEncryption)
            {
                newDict.Add("passphrase", "no-encryption");
            }
            else
            {
                string salt;
                opts.TryGetValue("passphrase-salt", out salt);
                if (string.IsNullOrEmpty(salt))
                {
                    // Not Crypto-class PRNG salts
                    var buf = new byte[32];
                    new Random().NextBytes(buf);
                    //Add version so we can detect and change the algorithm
                    salt = "v1:" + Library.Utility.Utility.ByteArrayAsHexString(buf);
                }

                newDict["passphrase-salt"] = salt;

                // We avoid storing the passphrase directly,
                // instead we salt and rehash repeatedly
                newDict.Add("passphrase", Library.Utility.Utility.ByteArrayAsHexString(Library.Utility.Utility.RepeatedHashWithSalt(options.Passphrase, salt, 1200)));
            }


            var needsUpdate = false;

            foreach (var k in newDict)
            {
                if (!opts.ContainsKey(k.Key))
                {
                    needsUpdate = true;
                }
                else if (opts[k.Key] != k.Value)
                {
                    if (k.Key == "passphrase")
                    {
                        if (!options.AllowPassphraseChange)
                        {
                            if (newDict[k.Key] == "no-encryption")
                            {
                                throw new Exception("Unsupported removal of passphrase");
                            }
                            else if (opts[k.Key] == "no-encryption")
                            {
                                throw new Exception("Unsupported addition of passphrase");
                            }
                            else
                            {
                                throw new Exception("Unsupported change of passphrase");
                            }
                        }
                    }
                    else
                    {
                        throw new Exception(string.Format("Unsupported change of parameter \"{0}\" from \"{1}\" to \"{2}\"", k.Key, opts[k.Key], k.Value));
                    }
                }
            }

            //Extra sanity check
            if (db.GetBlocksLargerThan(options.Blocksize) > 0)
            {
                throw new Exception("Unsupported block-size change detected");
            }

            if (needsUpdate)
            {
                // Make sure we do not loose values
                foreach (var k in opts)
                {
                    if (!newDict.ContainsKey(k.Key))
                    {
                        newDict[k.Key] = k.Value;
                    }
                }

                db.SetDbOptions(newDict, transaction);
            }
        }
示例#22
0
        private void submissionStatus_FormatCell(object sender, BrightIdeasSoftware.FormatCellEventArgs e)
        {
            string    font  = "Segoe UI";
            float     size  = 9.0F;
            FontStyle style = FontStyle.Regular;
            Color     fore  = Color.Black;

            //highlight item
            JudgeStatus js = (JudgeStatus)e.Model;

            if (js.uname == RegistryAccess.DefaultUsername)
            {
                for (int i = 0; i < e.Item.SubItems.Count; ++i)
                {
                    e.Item.SubItems[i].BackColor = Color.Turquoise;
                }
            }
            else if (LocalDatabase.ContainsUser(js.uname))
            {
                for (int i = 0; i < e.Item.SubItems.Count; ++i)
                {
                    e.Item.SubItems[i].BackColor = Color.LightBlue;
                }
            }

            //highlight other
            if (e.Column == sidSUB)
            {
                font = "Consolas";
                fore = Color.Teal;
                size = 8.5F;
            }
            else if (e.Column == unameSUB)
            {
                fore  = Color.Navy;
                style = FontStyle.Italic;
            }
            else if (e.Column == fullnameSUB)
            {
                font = "Segoe UI Semibold";
            }
            else if (e.Column == pnumSUB)
            {
                font  = "Consolas";
                fore  = Color.Navy;
                style = FontStyle.Italic;
            }
            else if (e.Column == ptitleSUB)
            {
                font = "Segoe UI Semibold";
                fore = Functions.GetProblemTitleColor(js.pnum);
            }
            else if (e.Column == runSUB)
            {
                fore = Color.SlateBlue;
            }
            else if (e.Column == memSUB)
            {
                font = "Consolas";
                fore = Color.Blue;
                size = 8.5F;
            }
            else if (e.Column == subtimeSUB)
            {
                fore = Color.Maroon;
            }
            else if (e.Column == rankSUB)
            {
                fore = Color.Navy;
                font = "Segoe UI Semibold";
            }
            else if (e.Column == verSUB)
            {
                font  = "Segoe UI";
                fore  = Functions.GetVerdictColor(js.ver);
                style = FontStyle.Bold;
            }
            else if (e.Column == lanSUB)
            {
                style = FontStyle.Bold;
                fore  = Color.Navy;
            }
            else
            {
                return;
            }

            e.SubItem.ForeColor = fore;
            e.SubItem.Font      = new Font(font, size, style);
        }
示例#23
0
            public bool FlushDbMessages(LocalDatabase db, System.Data.IDbTransaction transaction)
            {
                List<IDbEntry> entries;
                lock(m_dbqueuelock)
                    if (m_dbqueue.Count == 0)
                        return false;
                    else
                    {
                        entries = m_dbqueue;
                        m_dbqueue = new List<IDbEntry>();
                    }

                //As we replace the list, we can now freely access the elements without locking
                foreach(var e in entries)
                    if (e is DbOperation)
                        db.LogRemoteOperation(((DbOperation)e).Action, ((DbOperation)e).File, ((DbOperation)e).Result, transaction);
                    else if (e is DbUpdate)
                        db.UpdateRemoteVolume(((DbUpdate)e).Remotename, ((DbUpdate)e).State, ((DbUpdate)e).Size, ((DbUpdate)e).Hash, transaction);
                    else if (e is DbRename)
                        db.RenameRemoteFile(((DbRename)e).Oldname, ((DbRename)e).Newname, transaction);
                    else if (e != null)
                        m_stats.AddError(string.Format("Queue had element of type: {0}, {1}", e.GetType(), e.ToString()), null);

                return true;
            }
        /// <summary>
        /// Run the recreate procedure
        /// </summary>
        /// <param name="path">Path to the database that will be created</param>
        /// <param name="filelistfilter">A filter that can be used to disregard certain remote files, intended to be used to select a certain filelist</param>
        /// <param name="filenamefilter">Filters the files in a filelist to prevent downloading unwanted data</param>
        /// <param name="blockprocessor">A callback hook that can be used to work with downloaded block volumes, intended to be use to recover data blocks while processing blocklists</param>
        internal void DoRun(LocalDatabase dbparent, Library.Utility.IFilter filter = null, NumberedFilterFilelistDelegate filelistfilter = null, BlockVolumePostProcessor blockprocessor = null)
        {
            var hashalg = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm);
            if (hashalg == null)
                throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.BlockHashAlgorithm));
            var hashsize = hashalg.HashSize / 8;

            //We build a local database in steps.
            using(var restoredb = new LocalRecreateDatabase(dbparent, m_options))
            using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, restoredb))
            {
                var volumeIds = new Dictionary<string, long>();

                var rawlist = backend.List();

                //First step is to examine the remote storage to see what
                // kind of data we can find
                var remotefiles =
                (from x in rawlist
                let n = VolumeBase.ParseFilename(x)
                where
                    n != null
                        &&
                    n.Prefix == m_options.Prefix
                select n).ToArray(); //ToArray() ensures that we do not remote-request it multiple times

                if (remotefiles.Length == 0)
                {
                    if (rawlist.Count == 0)
                        throw new Exception("No files were found at the remote location, perhaps the target url is incorrect?");
                    else
                    {
                        var tmp =
                    (from x in rawlist
                        let n = VolumeBase.ParseFilename(x)
                    where
                        n != null
                    select n.Prefix).ToArray();

                        var types = tmp.Distinct().ToArray();
                        if (tmp.Length == 0)
                            throw new Exception(string.Format("Found {0} files at the remote storage, but none that could be parsed", rawlist.Count));
                        else if (types.Length == 1)
                            throw new Exception(string.Format("Found {0} parse-able files with the prefix {1}, did you forget to set the backup-prefix?", tmp.Length, types[0]));
                        else
                            throw new Exception(string.Format("Found {0} parse-able files (of {1} files) with different prefixes: {2}, did you forget to set the backup-prefix?", tmp.Length, rawlist.Count, string.Join(", ", types)));
                    }
                }

                //Then we select the filelist we should work with,
                // and create the filelist table to fit
                IEnumerable<IParsedVolume> filelists =
                    from n in remotefiles
                    where n.FileType == RemoteVolumeType.Files
                    orderby n.Time descending
                    select n;

                if (filelistfilter != null)
                    filelists = filelistfilter(filelists).Select(x => x.Value).ToArray();

                foreach(var fl in remotefiles)
                    volumeIds[fl.File.Name] = restoredb.RegisterRemoteVolume(fl.File.Name, fl.FileType, RemoteVolumeState.Uploaded);

                //Record all blocksets and files needed
                using(var tr = restoredb.BeginTransaction())
                {
                    var filelistWork = (from n in filelists orderby n.Time select new RemoteVolume(n.File) as IRemoteVolume).ToList();
                    foreach(var entry in new AsyncDownloader(filelistWork, backend))
                        try
                        {
                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                            {
                                backend.WaitForComplete(restoredb, null);
                                return;
                            }

                            using(var tmpfile = entry.TempFile)
                            {
                                if (entry.Hash != null && entry.Size > 0)
                                    restoredb.UpdateRemoteVolume(entry.Name, RemoteVolumeState.Verified, entry.Size, entry.Hash, tr);

                                var parsed = VolumeBase.ParseFilename(entry.Name);
                                // Create timestamped operations based on the file timestamp
                                var filesetid = restoredb.CreateFileset(volumeIds[entry.Name], parsed.Time, tr);
                                using(var filelistreader = new FilesetVolumeReader(parsed.CompressionModule, tmpfile, m_options))
                                    foreach(var fe in filelistreader.Files.Where(x => Library.Utility.FilterExpression.Matches(filter, x.Path)))
                                    {
                                        try
                                        {
                                            if (fe.Type == FilelistEntryType.Folder)
                                            {
                                                restoredb.AddDirectoryEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                            }
                                            else if (fe.Type == FilelistEntryType.File)
                                            {
                                                var blocksetid = restoredb.AddBlockset(fe.Hash, fe.Size, fe.BlocklistHashes, tr);
                                                restoredb.AddFileEntry(filesetid, fe.Path, fe.Time, blocksetid, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                            }
                                            else if (fe.Type == FilelistEntryType.Symlink)
                                            {
                                                restoredb.AddSymlinkEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                            }
                                            else
                                            {
                                                m_result.AddWarning(string.Format("Skipping file-entry with unknown type {0}: {1} ", fe.Type, fe.Path), null);
                                            }
                                        }
                                        catch (Exception ex)
                                        {
                                            m_result.AddWarning(string.Format("Failed to process file-entry: {0}", fe.Path), ex);
                                        }
                                    }
                            }
                        }
                        catch (Exception ex)
                        {
                            m_result.AddWarning(string.Format("Failed to process file: {0}", entry.Name), ex);
                            if (ex is System.Threading.ThreadAbortException)
                                throw;
                        }

                    using(new Logging.Timer("CommitUpdateFilesetFromRemote"))
                        tr.Commit();
                }

                //Grab all index files, and update the block table
                using(var tr = restoredb.BeginTransaction())
                {
                    var indexfiles =
                        from n in remotefiles
                        where n.FileType == RemoteVolumeType.Index
                        select new RemoteVolume(n.File) as IRemoteVolume;

                    foreach(var sf in new AsyncDownloader(indexfiles.ToList(), backend))
                        try
                        {
                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                            {
                                backend.WaitForComplete(restoredb, null);
                                return;
                            }

                            using(var tmpfile = sf.TempFile)
                            {
                                if (sf.Hash != null && sf.Size > 0)
                                    restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Verified, sf.Size, sf.Hash, tr);

                                using(var svr = new IndexVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options, hashsize))
                                {
                                    Utility.VerifyParameters(restoredb, m_options);

                                    foreach(var a in svr.Volumes)
                                    {
                                        var volumeID = restoredb.GetRemoteVolumeID(a.Filename);
                                        //Add all block/volume mappings
                                        foreach(var b in a.Blocks)
                                            restoredb.UpdateBlock(b.Key, b.Value, volumeID, tr);

                                        restoredb.UpdateRemoteVolume(a.Filename, RemoteVolumeState.Verified, a.Length, a.Hash, tr);
                                        restoredb.AddIndexBlockLink(restoredb.GetRemoteVolumeID(sf.Name), volumeID, tr);
                                    }

                                    //If there are blocklists in the index file, update the blocklists
                                    foreach(var b in svr.BlockLists)
                                        restoredb.UpdateBlockset(b.Hash, b.Blocklist, tr);
                                }
                            }
                        }
                        catch (Exception ex)
                        {
                            //Not fatal
                            m_result.AddWarning(string.Format("Failed to process index file: {0}", sf.Name), ex);
                            if (ex is System.Threading.ThreadAbortException)
                                throw;
                        }

                    using(new Logging.Timer("CommitRecreatedDb"))
                        tr.Commit();

                    // TODO: In some cases, we can avoid downloading all index files,
                    // if we are lucky and pick the right ones
                }

                // We have now grabbed as much information as possible,
                // if we are still missing data, we must now fetch block files
                restoredb.FindMissingBlocklistHashes(hashsize, null);

                //We do this in three passes
                for(var i = 0; i < 3; i++)
                {
                    // Grab the list matching the pass type
                    var lst = restoredb.GetMissingBlockListVolumes(i).ToList();
                    foreach (var sf in new AsyncDownloader(lst, backend))
                        using (var tmpfile = sf.TempFile)
                        using (var rd = new BlockVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options))
                        using (var tr = restoredb.BeginTransaction())
                        {
                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                            {
                                backend.WaitForComplete(restoredb, null);
                                return;
                            }

                            var volumeid = restoredb.GetRemoteVolumeID(sf.Name);

                            // Update the block table so we know about the block/volume map
                            foreach(var h in rd.Blocks)
                                restoredb.UpdateBlock(h.Key, h.Value, volumeid, tr);

                            // Grab all known blocklists from the volume
                            foreach (var blocklisthash in restoredb.GetBlockLists(volumeid))
                                restoredb.UpdateBlockset(blocklisthash, rd.ReadBlocklist(blocklisthash, hashsize), tr);

                            // Update tables so we know if we are done
                            restoredb.FindMissingBlocklistHashes(hashsize, tr);

                            using(new Logging.Timer("CommitRestoredBlocklist"))
                                tr.Commit();

                            //At this point we can patch files with data from the block volume
                            if (blockprocessor != null)
                                blockprocessor(sf.Name, rd);
                        }
                }

                backend.WaitForComplete(restoredb, null);

                //All done, we must verify that we have all blocklist fully intact
                // if this fails, the db will not be deleted, so it can be used,
                // except to continue a backup
                restoredb.VerifyConsistency(null);
            }
        }
示例#25
0
        /// <summary>
        /// Creates a temporary verification file.
        /// </summary>
        /// <returns>The verification file.</returns>
        /// <param name="db">The database instance</param>
        /// <param name="stream">The stream to write to</param>
        public static void CreateVerificationFile(LocalDatabase db, System.IO.StreamWriter stream)
        {
            var s = new Newtonsoft.Json.JsonSerializer();

            s.Serialize(stream, db.GetRemoteVolumes().Cast <IRemoteVolume>().ToArray());
        }
示例#26
0
        /// <summary>This will be called when _revsToInsert fills up:</summary>
        private void InsertDownloads(IList <RevisionInternal> downloads)
        {
            Log.To.SyncPerf.I(TAG, "{0} inserting {1} revisions into db...", this, downloads.Count);
            Log.To.Sync.V(TAG, "{0} inserting {1} revisions...", this, downloads.Count);
            var time = DateTime.UtcNow;

            downloads.Sort(new RevisionComparer());

            if (!LocalDatabase.IsOpen)
            {
                return;
            }

            try {
                var success = LocalDatabase.RunInTransaction(() =>
                {
                    foreach (var rev in downloads)
                    {
                        var fakeSequence = rev.Sequence;
                        rev.Sequence     = 0L;
                        var history      = Database.ParseCouchDBRevisionHistory(rev.GetProperties());
                        if (history.Count == 0 && rev.Generation > 1)
                        {
                            Log.To.Sync.W(TAG, "{0} missing revision history in response for: {0}", this, rev);
                            LastError = new CouchbaseLiteException(StatusCode.UpStreamError);
                            RevisionFailed();
                            continue;
                        }

                        Log.To.Sync.V(TAG, String.Format("Inserting {0} {1}",
                                                         new SecureLogString(rev.DocID, LogMessageSensitivity.PotentiallyInsecure),
                                                         new LogJsonString(history)));

                        // Insert the revision:
                        try {
                            LocalDatabase.ForceInsert(rev, history, RemoteUrl);
                        } catch (CouchbaseLiteException e) {
                            if (e.Code == StatusCode.Forbidden)
                            {
                                Log.To.Sync.I(TAG, "{0} remote rev failed validation: {1}", this, rev);
                            }
                            else if (e.Code == StatusCode.AttachmentError)
                            {
                                // Revision with broken _attachments metadata (i.e. bogus revpos)
                                // should not stop replication. Warn and skip it.
                                Log.To.Sync.W(TAG, "{0} revision {1} has invalid attachment metadata: {2}",
                                              this, rev, new SecureLogJsonString(rev.GetAttachments(), LogMessageSensitivity.PotentiallyInsecure));
                            }
                            else if (e.Code == StatusCode.DbBusy)
                            {
                                Log.To.Sync.I(TAG, "Database is busy, will retry soon...");
                                // abort transaction; RunInTransaction will retry
                                return(false);
                            }
                            else
                            {
                                Log.To.Sync.W(TAG, "{0} failed to write {1}: status={2}", this, rev, e.Code);
                                RevisionFailed();
                                LastError = e;
                                continue;
                            }
                        } catch (Exception e) {
                            throw Misc.CreateExceptionAndLog(Log.To.Sync, e, TAG,
                                                             "Error inserting downloads");
                        }

                        _pendingSequences.RemoveSequence(fakeSequence);
                    }

                    Log.To.Sync.V(TAG, "{0} finished inserting {1} revisions", this, downloads.Count);

                    return(true);
                });

                Log.To.Sync.V(TAG, "Finished inserting {0} revisions. Success == {1}", downloads.Count, success);
            } catch (Exception e) {
                Log.To.Sync.E(TAG, "Exception inserting revisions, continuing...", e);
            }

            // Checkpoint:
            LastSequence = _pendingSequences.GetCheckpointedValue();

            var delta = (DateTime.UtcNow - time).TotalMilliseconds;

            Log.To.Sync.I(TAG, "Inserted {0} revs in {1} milliseconds", downloads.Count, delta);
            Log.To.SyncPerf.I(TAG, "Inserted {0} revs in {1} milliseconds", downloads.Count, delta);
            SafeAddToCompletedChangesCount(downloads.Count);
            PauseOrResume();
        }
示例#27
0
 public void SetDatabase(LocalDatabase db)
 {
     if (m_parent != null)
     {
         m_parent.SetDatabase(db);
     }
     else
     {
         lock(m_lock)
         {
             m_db = db;
             if (m_db != null)
                 db.SetResult(this);
         }
     }
 }
示例#28
0
        /// <summary>
        /// Uploads the verification file.
        /// </summary>
        /// <param name="backendurl">The backend url</param>
        /// <param name="options">The options to use</param>
        /// <param name="result">The result writer</param>
        /// <param name="db">The attached database</param>
        /// <param name="transaction">An optional transaction object</param>
        public static void UploadVerificationFile(string backendurl, Options options, IBackendWriter result, LocalDatabase db, System.Data.IDbTransaction transaction)
        {
            using (var backend = new BackendManager(backendurl, options, result, db))
                using (var tempfile = new Library.Utility.TempFile())
                {
                    var remotename = options.Prefix + "-verification.json";
                    using (var stream = new System.IO.StreamWriter(tempfile, false, System.Text.Encoding.UTF8))
                        FilelistProcessor.CreateVerificationFile(db, stream);

                    if (options.Dryrun)
                    {
                        result.AddDryrunMessage(string.Format("Would upload verification file: {0}, size: {1}", remotename, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(tempfile).Length)));
                    }
                    else
                    {
                        backend.PutUnencrypted(remotename, tempfile);
                        backend.WaitForComplete(db, transaction);
                    }
                }
        }
示例#29
0
 public DatabaseController(LocalDatabase dataBase)
 {
     db = dataBase;
 }
示例#30
0
        /// <summary>
        /// Helper method that verifies uploaded volumes and updates their state in the database.
        /// Throws an error if there are issues with the remote storage
        /// </summary>
        /// <param name="backend">The backend instance to use</param>
        /// <param name="options">The options used</param>
        /// <param name="database">The database to compare with</param>
        public static RemoteAnalysisResult RemoteListAnalysis(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log)
        {
            var rawlist = backend.List();
            var lookup  = new Dictionary <string, Volumes.IParsedVolume>();

            var remotelist = (from n in rawlist
                              let p = Volumes.VolumeBase.ParseFilename(n)
                                      where p != null && p.Prefix == options.Prefix
                                      select p).ToList();

            var otherlist = (from n in rawlist
                             let p = Volumes.VolumeBase.ParseFilename(n)
                                     where p != null && p.Prefix != options.Prefix
                                     select p).ToList();

            var unknownlist = (from n in rawlist
                               let p = Volumes.VolumeBase.ParseFilename(n)
                                       where p == null
                                       select n).ToList();

            var filesets = (from n in remotelist
                            where n.FileType == RemoteVolumeType.Files orderby n.Time descending
                            select n).ToList();

            log.KnownFileCount   = remotelist.Count();
            log.KnownFileSize    = remotelist.Select(x => x.File.Size).Sum();
            log.UnknownFileCount = unknownlist.Count();
            log.UnknownFileSize  = unknownlist.Select(x => x.Size).Sum();
            log.BackupListCount  = filesets.Count;
            log.LastBackupDate   = filesets.Count == 0 ? new DateTime(0) : filesets[0].Time.ToLocalTime();

            if (backend is Library.Interface.IQuotaEnabledBackend)
            {
                log.TotalQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).TotalQuotaSpace;
                log.FreeQuotaSpace  = ((Library.Interface.IQuotaEnabledBackend)backend).FreeQuotaSpace;
            }

            log.AssignedQuotaSpace = options.QuotaSize;

            foreach (var s in remotelist)
            {
                lookup[s.File.Name] = s;
            }

            var missing     = new List <RemoteVolumeEntry>();
            var missingHash = new List <Tuple <long, RemoteVolumeEntry> >();
            var locallist   = database.GetRemoteVolumes();

            foreach (var i in locallist)
            {
                Volumes.IParsedVolume r;
                var remoteFound = lookup.TryGetValue(i.Name, out r);
                var correctSize = remoteFound && i.Size >= 0 && (i.Size == r.File.Size || r.File.Size < 0);

                lookup.Remove(i.Name);

                switch (i.State)
                {
                case RemoteVolumeState.Deleted:
                    if (remoteFound)
                    {
                        log.AddMessage(string.Format("ignoring remote file listed as {0}: {1}", i.State, i.Name));
                    }

                    break;

                case RemoteVolumeState.Temporary:
                case RemoteVolumeState.Deleting:
                    if (remoteFound)
                    {
                        log.AddMessage(string.Format("removing remote file listed as {0}: {1}", i.State, i.Name));
                        backend.Delete(i.Name, i.Size, true);
                    }
                    else
                    {
                        log.AddMessage(string.Format("removing file listed as {0}: {1}", i.State, i.Name));
                        database.RemoveRemoteVolume(i.Name, null);
                    }
                    break;

                case RemoteVolumeState.Uploading:
                    if (remoteFound && correctSize && r.File.Size >= 0)
                    {
                        log.AddMessage(string.Format("promoting uploaded complete file from {0} to {2}: {1}", i.State, i.Name, RemoteVolumeState.Uploaded));
                        database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Uploaded, i.Size, i.Hash);
                    }
                    else if (!remoteFound)
                    {
                        log.AddMessage(string.Format("scheduling missing file for deletion, currently listed as {0}: {1}", i.State, i.Name));
                        database.RemoveRemoteVolume(i.Name, null);
                        database.RegisterRemoteVolume(i.Name, i.Type, RemoteVolumeState.Deleting, null);
                    }
                    else
                    {
                        log.AddMessage(string.Format("removing incomplete remote file listed as {0}: {1}", i.State, i.Name));
                        backend.Delete(i.Name, i.Size, true);
                    }
                    break;

                case RemoteVolumeState.Uploaded:
                    if (!remoteFound)
                    {
                        missing.Add(i);
                    }
                    else if (correctSize)
                    {
                        database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Verified, i.Size, i.Hash);
                    }
                    else
                    {
                        missingHash.Add(new Tuple <long, RemoteVolumeEntry>(r.File.Size, i));
                    }

                    break;

                case RemoteVolumeState.Verified:
                    if (!remoteFound)
                    {
                        missing.Add(i);
                    }
                    else if (!correctSize)
                    {
                        missingHash.Add(new Tuple <long, RemoteVolumeEntry>(r.File.Size, i));
                    }

                    break;

                default:
                    log.AddWarning(string.Format("unknown state for remote file listed as {0}: {1}", i.State, i.Name), null);
                    break;
                }

                backend.FlushDbMessages();
            }


            foreach (var i in missingHash)
            {
                log.AddWarning(string.Format("remote file {1} is listed as {0} with size {2} but should be {3}, please verify the sha256 hash \"{4}\"", i.Item2.State, i.Item2.Name, i.Item1, i.Item2.Size, i.Item2.Hash), null);
            }

            return(new RemoteAnalysisResult()
            {
                ParsedVolumes = remotelist,
                OtherVolumes = otherlist,
                ExtraVolumes = lookup.Values,
                MissingVolumes = missing,
                VerificationRequiredVolumes = missingHash.Select(x => x.Item2)
            });
        }
示例#31
0
        private void UploadChanges(IList <RevisionInternal> changes, IDictionary <string, object> revsDiffResults)
        {
            // Go through the list of local changes again, selecting the ones the destination server
            // said were missing and mapping them to a JSON dictionary in the form _bulk_docs wants:
            var docsToSend = new List <object> ();
            var revsToSend = new RevisionList();
            IDictionary <string, object> revResults = null;

            foreach (var rev in changes)
            {
                // Is this revision in the server's 'missing' list?
                if (revsDiffResults != null)
                {
                    revResults = revsDiffResults.Get(rev.DocID).AsDictionary <string, object>();
                    if (revResults == null)
                    {
                        continue;
                    }

                    var revs = revResults.Get("missing").AsList <string>();
                    if (revs == null || !revs.Any(id => id.Equals(rev.RevID.ToString())))
                    {
                        RemovePending(rev);
                        continue;
                    }
                }

                IDictionary <string, object> properties = null;
                RevisionInternal             loadedRev;
                try {
                    loadedRev = LocalDatabase.LoadRevisionBody(rev);
                    if (loadedRev == null)
                    {
                        throw Misc.CreateExceptionAndLog(Log.To.Sync, StatusCode.NotFound, TAG,
                                                         "Unable to load revision body");
                    }

                    properties = rev.GetProperties();
                } catch (Exception e1) {
                    Log.To.Sync.E(TAG, String.Format("Couldn't get local contents of {0}, marking revision failed",
                                                     rev), e1);
                    RevisionFailed();
                    continue;
                }

                if (properties.GetCast <bool> ("_removed"))
                {
                    RemovePending(rev);
                    continue;
                }

                var populatedRev = TransformRevision(loadedRev);
                var backTo       = revResults?.Get("possible_ancestors")?.AsList <RevisionID>();

                try {
                    var history = LocalDatabase.GetRevisionHistory(populatedRev, backTo);
                    if (history == null)
                    {
                        throw Misc.CreateExceptionAndLog(Log.To.Sync, StatusCode.DbError, TAG,
                                                         "Unable to load revision history");
                    }

                    properties["_revisions"] = TreeRevisionID.MakeRevisionHistoryDict(history);
                    populatedRev.SetPropertyForKey("_revisions", properties["_revisions"]);
                } catch (Exception e1) {
                    Log.To.Sync.E(TAG, "Error getting revision history, marking revision failed", e1);
                    RevisionFailed();
                    continue;
                }

                // Strip any attachments already known to the target db:
                if (properties.Get("_attachments") != null)
                {
                    // Look for the latest common ancestor and stuf out older attachments:
                    var minRevPos = FindCommonAncestor(populatedRev, backTo);
                    try {
                        LocalDatabase.ExpandAttachments(populatedRev, minRevPos + 1, !_dontSendMultipart, false);
                    } catch (Exception ex) {
                        Log.To.Sync.E(TAG, "Error expanding attachments, marking revision failed", ex);
                        RevisionFailed();
                        continue;
                    }

                    properties = populatedRev.GetProperties();
                    if (!_dontSendMultipart && UploadMultipartRevision(populatedRev))
                    {
                        continue;
                    }
                }

                if (properties == null || !properties.ContainsKey("_id"))
                {
                    throw Misc.CreateExceptionAndLog(Log.To.Sync, StatusCode.BadParam, TAG,
                                                     "properties must contain a document _id");
                }

                // Add the _revisions list:
                revsToSend.Add(rev);

                //now add it to the docs to send
                docsToSend.Add(properties);
            }

            UploadBulkDocs(docsToSend, revsToSend);
        }
示例#32
0
//JAVA TO C# CONVERTER WARNING: 'final' parameters are ignored unless the option to convert to C# 7.2 'in' parameters is selected:
//ORIGINAL LINE: public EnterpriseReadReplicaEditionModule(final org.neo4j.graphdb.factory.module.PlatformModule platformModule, final org.neo4j.causalclustering.discovery.DiscoveryServiceFactory discoveryServiceFactory, org.neo4j.causalclustering.identity.MemberId myself)
        public EnterpriseReadReplicaEditionModule(PlatformModule platformModule, DiscoveryServiceFactory discoveryServiceFactory, MemberId myself)
        {
            LogService logging = platformModule.Logging;

            IoLimiterConflict = new ConfigurableIOLimiter(platformModule.Config);
            platformModule.JobScheduler.TopLevelGroupName = "ReadReplica " + myself;

            Dependencies          dependencies = platformModule.Dependencies;
            Config                config       = platformModule.Config;
            FileSystemAbstraction fileSystem   = platformModule.FileSystem;
            PageCache             pageCache    = platformModule.PageCache;
//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
//ORIGINAL LINE: final org.neo4j.io.layout.DatabaseLayout databaseLayout = platformModule.storeLayout.databaseLayout(config.get(org.neo4j.graphdb.factory.GraphDatabaseSettings.active_database));
            DatabaseLayout databaseLayout = platformModule.StoreLayout.databaseLayout(config.Get(GraphDatabaseSettings.active_database));

            LifeSupport life = platformModule.Life;

            ThreadToTransactionBridgeConflict = dependencies.SatisfyDependency(new ThreadToStatementContextBridge(GetGlobalAvailabilityGuard(platformModule.Clock, logging, platformModule.Config)));
            this.AccessCapabilityConflict     = new ReadOnly();

            WatcherServiceFactoryConflict = dir => CreateFileSystemWatcherService(fileSystem, dir, logging, platformModule.JobScheduler, config, FileWatcherFileNameFilter());
            dependencies.SatisfyDependencies(WatcherServiceFactoryConflict);

            ReadReplicaLockManager emptyLockManager = new ReadReplicaLockManager();

            LocksSupplierConflict = () => emptyLockManager;
            StatementLocksFactoryProviderConflict = locks => (new StatementLocksFactorySelector(locks, config, logging)).select();

            IdContextFactoryConflict = IdContextFactoryBuilder.of(new EnterpriseIdTypeConfigurationProvider(config), platformModule.JobScheduler).withFileSystem(fileSystem).build();

            TokenHoldersProviderConflict = databaseName => new TokenHolders(new DelegatingTokenHolder(new ReadOnlyTokenCreator(), Org.Neo4j.Kernel.impl.core.TokenHolder_Fields.TYPE_PROPERTY_KEY), new DelegatingTokenHolder(new ReadOnlyTokenCreator(), Org.Neo4j.Kernel.impl.core.TokenHolder_Fields.TYPE_LABEL), new DelegatingTokenHolder(new ReadOnlyTokenCreator(), Org.Neo4j.Kernel.impl.core.TokenHolder_Fields.TYPE_RELATIONSHIP_TYPE));

            File contextDirectory = platformModule.StoreLayout.storeDirectory();

            life.Add(dependencies.SatisfyDependency(new KernelData(fileSystem, pageCache, contextDirectory, config, platformModule.DataSourceManager)));

            HeaderInformationFactoryConflict = TransactionHeaderInformationFactory.DEFAULT;

            SchemaWriteGuardConflict = () =>
            {
            };

            TransactionStartTimeoutConflict = config.Get(GraphDatabaseSettings.transaction_start_timeout).toMillis();

            ConstraintSemanticsConflict = new EnterpriseConstraintSemantics();

            PublishEditionInfo(dependencies.ResolveDependency(typeof(UsageData)), platformModule.DatabaseInfo, config);
            CommitProcessFactoryConflict = ReadOnly();

            ConnectionTrackerConflict = dependencies.SatisfyDependency(CreateConnectionTracker());

            _logProvider = platformModule.Logging.InternalLogProvider;
            LogProvider userLogProvider = platformModule.Logging.UserLogProvider;

            _logProvider.getLog(this.GetType()).info(string.Format("Generated new id: {0}", myself));

            RemoteMembersResolver hostnameResolver = chooseResolver(config, platformModule.Logging);

            ConfigureDiscoveryService(discoveryServiceFactory, dependencies, config, _logProvider);

            _topologyService = discoveryServiceFactory.ReadReplicaTopologyService(config, _logProvider, platformModule.JobScheduler, myself, hostnameResolver, ResolveStrategy(config, _logProvider));

            life.Add(dependencies.SatisfyDependency(_topologyService));

            // We need to satisfy the dependency here to keep users of it, such as BoltKernelExtension, happy.
            dependencies.SatisfyDependency(SslPolicyLoader.create(config, _logProvider));

            DuplexPipelineWrapperFactory pipelineWrapperFactory = pipelineWrapperFactory();
            PipelineWrapper serverPipelineWrapper       = pipelineWrapperFactory.ForServer(config, dependencies, _logProvider, CausalClusteringSettings.ssl_policy);
            PipelineWrapper clientPipelineWrapper       = pipelineWrapperFactory.ForClient(config, dependencies, _logProvider, CausalClusteringSettings.ssl_policy);
            PipelineWrapper backupServerPipelineWrapper = pipelineWrapperFactory.ForServer(config, dependencies, _logProvider, OnlineBackupSettings.ssl_policy);

            NettyPipelineBuilderFactory clientPipelineBuilderFactory       = new NettyPipelineBuilderFactory(clientPipelineWrapper);
            NettyPipelineBuilderFactory serverPipelineBuilderFactory       = new NettyPipelineBuilderFactory(serverPipelineWrapper);
            NettyPipelineBuilderFactory backupServerPipelineBuilderFactory = new NettyPipelineBuilderFactory(backupServerPipelineWrapper);

            SupportedProtocolCreator                 supportedProtocolCreator   = new SupportedProtocolCreator(config, _logProvider);
            ApplicationSupportedProtocols            supportedCatchupProtocols  = supportedProtocolCreator.CreateSupportedCatchupProtocol();
            ICollection <ModifierSupportedProtocols> supportedModifierProtocols = supportedProtocolCreator.CreateSupportedModifierProtocols();

            ApplicationProtocolRepository applicationProtocolRepository = new ApplicationProtocolRepository(Protocol_ApplicationProtocols.values(), supportedCatchupProtocols);
            ModifierProtocolRepository    modifierProtocolRepository    = new ModifierProtocolRepository(Org.Neo4j.causalclustering.protocol.Protocol_ModifierProtocols.values(), supportedModifierProtocols);

            System.Func <CatchUpResponseHandler, ChannelInitializer <SocketChannel> > channelInitializer = handler =>
            {
                ProtocolInstallerRepository <ProtocolInstaller.Orientation.Client> protocolInstallerRepository = new ProtocolInstallerRepository <ProtocolInstaller.Orientation.Client>(singletonList(new CatchupProtocolClientInstaller.Factory(clientPipelineBuilderFactory, _logProvider, handler)), ModifierProtocolInstaller.allClientInstallers);
                Duration handshakeTimeout = config.Get(CausalClusteringSettings.handshake_timeout);
                return(new HandshakeClientInitializer(applicationProtocolRepository, modifierProtocolRepository, protocolInstallerRepository, clientPipelineBuilderFactory, handshakeTimeout, _logProvider, userLogProvider));
            };

            long inactivityTimeoutMs = config.Get(CausalClusteringSettings.catch_up_client_inactivity_timeout).toMillis();

            CatchUpClient catchUpClient = life.Add(new CatchUpClient(_logProvider, Clocks.systemClock(), inactivityTimeoutMs, channelInitializer));

//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
//ORIGINAL LINE: final System.Func<org.neo4j.kernel.internal.DatabaseHealth> databaseHealthSupplier = () -> platformModule.dataSourceManager.getDataSource().getDependencyResolver().resolveDependency(org.neo4j.kernel.internal.DatabaseHealth.class);
            System.Func <DatabaseHealth> databaseHealthSupplier = () => platformModule.DataSourceManager.DataSource.DependencyResolver.resolveDependency(typeof(DatabaseHealth));

            StoreFiles storeFiles = new StoreFiles(fileSystem, pageCache);
            LogFiles   logFiles   = BuildLocalDatabaseLogFiles(platformModule, fileSystem, databaseLayout, config);

            LocalDatabase localDatabase = new LocalDatabase(databaseLayout, storeFiles, logFiles, platformModule.DataSourceManager, databaseHealthSupplier, GetGlobalAvailabilityGuard(platformModule.Clock, platformModule.Logging, platformModule.Config), _logProvider);

            System.Func <TransactionCommitProcess> writableCommitProcess = () => new TransactionRepresentationCommitProcess(localDatabase.DataSource().DependencyResolver.resolveDependency(typeof(TransactionAppender)), localDatabase.DataSource().DependencyResolver.resolveDependency(typeof(StorageEngine)));

            LifeSupport txPulling    = new LifeSupport();
            int         maxBatchSize = config.Get(CausalClusteringSettings.read_replica_transaction_applier_batch_size);

            CommandIndexTracker commandIndexTracker = platformModule.Dependencies.satisfyDependency(new CommandIndexTracker());
            BatchingTxApplier   batchingTxApplier   = new BatchingTxApplier(maxBatchSize, () => localDatabase.DataSource().DependencyResolver.resolveDependency(typeof(TransactionIdStore)), writableCommitProcess, platformModule.Monitors, platformModule.Tracers.pageCursorTracerSupplier, platformModule.VersionContextSupplier, commandIndexTracker, _logProvider);

            TimerService timerService = new TimerService(platformModule.JobScheduler, _logProvider);

            ExponentialBackoffStrategy storeCopyBackoffStrategy = new ExponentialBackoffStrategy(1, config.Get(CausalClusteringSettings.store_copy_backoff_max_wait).toMillis(), TimeUnit.MILLISECONDS);

            RemoteStore remoteStore = new RemoteStore(platformModule.Logging.InternalLogProvider, fileSystem, platformModule.PageCache, new StoreCopyClient(catchUpClient, platformModule.Monitors, _logProvider, storeCopyBackoffStrategy), new TxPullClient(catchUpClient, platformModule.Monitors), new TransactionLogCatchUpFactory(), config, platformModule.Monitors);

            CopiedStoreRecovery copiedStoreRecovery = new CopiedStoreRecovery(config, platformModule.KernelExtensionFactories, platformModule.PageCache);

            txPulling.Add(copiedStoreRecovery);

            CompositeSuspendable servicesToStopOnStoreCopy = new CompositeSuspendable();

            StoreCopyProcess storeCopyProcess = new StoreCopyProcess(fileSystem, pageCache, localDatabase, copiedStoreRecovery, remoteStore, _logProvider);

            ConnectToRandomCoreServerStrategy defaultStrategy = new ConnectToRandomCoreServerStrategy();

            defaultStrategy.Inject(_topologyService, config, _logProvider, myself);

            UpstreamDatabaseStrategySelector upstreamDatabaseStrategySelector = CreateUpstreamDatabaseStrategySelector(myself, config, _logProvider, _topologyService, defaultStrategy);

            CatchupPollingProcess catchupProcess = new CatchupPollingProcess(_logProvider, localDatabase, servicesToStopOnStoreCopy, catchUpClient, upstreamDatabaseStrategySelector, timerService, config.Get(CausalClusteringSettings.pull_interval).toMillis(), batchingTxApplier, platformModule.Monitors, storeCopyProcess, databaseHealthSupplier, _topologyService);

            dependencies.SatisfyDependencies(catchupProcess);

            txPulling.Add(batchingTxApplier);
            txPulling.Add(catchupProcess);
            txPulling.Add(new WaitForUpToDateStore(catchupProcess, _logProvider));

            ExponentialBackoffStrategy retryStrategy = new ExponentialBackoffStrategy(1, 30, TimeUnit.SECONDS);

            life.Add(new ReadReplicaStartupProcess(remoteStore, localDatabase, txPulling, upstreamDatabaseStrategySelector, retryStrategy, _logProvider, platformModule.Logging.UserLogProvider, storeCopyProcess, _topologyService));

            CheckPointerService         checkPointerService  = new CheckPointerService(() => localDatabase.DataSource().DependencyResolver.resolveDependency(typeof(CheckPointer)), platformModule.JobScheduler, Group.CHECKPOINT);
            RegularCatchupServerHandler catchupServerHandler = new RegularCatchupServerHandler(platformModule.Monitors, _logProvider, localDatabase.storeId, localDatabase.dataSource, localDatabase.isAvailable, fileSystem, null, checkPointerService);

            InstalledProtocolHandler installedProtocolHandler = new InstalledProtocolHandler();               // TODO: hook into a procedure
            Server catchupServer = (new CatchupServerBuilder(catchupServerHandler)).serverHandler(installedProtocolHandler).catchupProtocols(supportedCatchupProtocols).modifierProtocols(supportedModifierProtocols).pipelineBuilder(serverPipelineBuilderFactory).userLogProvider(userLogProvider).debugLogProvider(_logProvider).listenAddress(config.Get(transaction_listen_address)).serverName("catchup-server").build();

            TransactionBackupServiceProvider transactionBackupServiceProvider = new TransactionBackupServiceProvider(_logProvider, userLogProvider, supportedCatchupProtocols, supportedModifierProtocols, backupServerPipelineBuilderFactory, catchupServerHandler, installedProtocolHandler);
            Optional <Server> backupCatchupServer = transactionBackupServiceProvider.ResolveIfBackupEnabled(config);

            servicesToStopOnStoreCopy.Add(catchupServer);
            backupCatchupServer.ifPresent(servicesToStopOnStoreCopy.add);

            life.Add(catchupServer);                 // must start last and stop first, since it handles external requests
            backupCatchupServer.ifPresent(life.add);
        }
示例#33
0
        /// <summary>
        /// Load user submissions information to currentUser
        /// </summary>
        /// <param name="user">Username to load</param>
        public void LoadUserSub(string user)
        {
            //if 'user' is already loaded then do nothing
            if (currentUser != null && currentUser.uname == user)
            {
                return;
            }
            if (!LocalDatabase.ContainsUser(user))
            {
                return;
            }

            worldRanklist.ClearObjects();
            lastSubmissions1.ClearObjects();
            Interactivity.progTracker.ShowUserInfo(null);

            try
            {
                Interactivity.SetStatus();

                //if current user is default user then get it from LocalDatabase
                if (user == RegistryAccess.DefaultUsername)
                {
                    currentUser = LocalDatabase.DefaultUser;
                }
                else
                {
                    //load current user from json data stored on documents
                    string file = LocalDirectory.GetUserSubPath(user);
                    string json = File.ReadAllText(file);
                    currentUser = JsonConvert.DeserializeObject <UserInfo>(json);
                }

                //if no data could found, create a new instance
                if (currentUser == null)
                {
                    currentUser = new UserInfo(user);
                    if (user == RegistryAccess.DefaultUsername)
                    {
                        LocalDatabase.DefaultUser = currentUser;
                    }
                }

                //process loaded data
                //-> this is very important
                currentUser.Process();

                //if no previous data exist
                if (currentUser.LastSID == 0)
                {
                    DownloadUserSubs(user);
                }
                else
                {
                    //show list
                    ShowDataByTab();
                }

                //select user
                SelectUsername(user);
            }
            catch (Exception ex)
            {
                Interactivity.SetStatus("Error while loading user submissions.");
                Logger.Add(ex.Message, "User Statistics | LoadUserSub(string user)");
            }
        }
示例#34
0
        private void worldRanklist_FormatCell(object sender, BrightIdeasSoftware.FormatCellEventArgs e)
        {
            if (e.Model == null)
            {
                return;
            }

            //change back-color of known users
            UserRanklist js = (UserRanklist)e.Model;

            if (js.username == RegistryAccess.DefaultUsername)
            {
                for (int i = 0; i < e.Item.SubItems.Count; ++i)
                {
                    e.Item.SubItems[i].BackColor = Color.Turquoise;
                }
            }
            else if (LocalDatabase.ContainsUser(js.username))
            {
                for (int i = 0; i < e.Item.SubItems.Count; ++i)
                {
                    e.Item.SubItems[i].BackColor = Color.LightBlue;
                }
            }

            //format other cells
            string    font  = "Segoe UI";
            float     size  = 9.0F;
            FontStyle style = FontStyle.Regular;
            Color     fore  = Color.Black;

            if (e.Column == rankRANK)
            {
                font = "Consolas";
                fore = Color.Teal;
            }
            else if (e.Column == usernameRANK)
            {
                fore = Color.Blue;
            }
            else if (e.Column == nameRANK)
            {
                font = "Segoe UI Semibold";
            }
            else if (e.Column == acRANK)
            {
                font = "Consolas";
                fore = Color.Navy;
            }
            else if (e.Column == nosRANK)
            {
                font = "Consolas";
                fore = Color.Maroon;
            }
            else
            {
                return;
            }

            e.SubItem.ForeColor = fore;
            e.SubItem.Font      = new Font(font, size, style);
        }
示例#35
0
            public bool FlushDbMessages(LocalDatabase db, System.Data.IDbTransaction transaction)
            {
                List<IDbEntry> entries;
                lock(m_dbqueuelock)
                    if (m_dbqueue.Count == 0)
                        return false;
                    else
                    {
                        entries = m_dbqueue;
                        m_dbqueue = new List<IDbEntry>();
                    }

                // collect removed volumes for final db cleanup.
                HashSet<string> volsRemoved = new HashSet<string>();

                //As we replace the list, we can now freely access the elements without locking
                foreach(var e in entries)
                    if (e is DbOperation)
                        db.LogRemoteOperation(((DbOperation)e).Action, ((DbOperation)e).File, ((DbOperation)e).Result, transaction);
                    else if (e is DbUpdate && ((DbUpdate)e).State == RemoteVolumeState.Deleted)
                    {
                        db.UpdateRemoteVolume(((DbUpdate)e).Remotename, RemoteVolumeState.Deleted, ((DbUpdate)e).Size, ((DbUpdate)e).Hash, true, transaction);
                        volsRemoved.Add(((DbUpdate)e).Remotename);
                    }
                    else if (e is DbUpdate)
                        db.UpdateRemoteVolume(((DbUpdate)e).Remotename, ((DbUpdate)e).State, ((DbUpdate)e).Size, ((DbUpdate)e).Hash, transaction);
                    else if (e is DbRename)
                        db.RenameRemoteFile(((DbRename)e).Oldname, ((DbRename)e).Newname, transaction);
                    else if (e != null)
                        m_stats.AddError(string.Format("Queue had element of type: {0}, {1}", e.GetType(), e.ToString()), null);

                // Finally remove volumes from DB.
                if (volsRemoved.Count > 0)
                    db.RemoveRemoteVolumes(volsRemoved);

                return true;
            }
示例#36
0
 public NonFictionImporter(LocalDatabase localDatabase, BitArray existingLibgenIds, NonFictionBook lastModifiedNonFictionBook)
     : base(localDatabase, existingLibgenIds, TableDefinitions.NonFiction)
 {
     lastModifiedDateTime = lastModifiedNonFictionBook.LastModifiedDateTime;
     lastModifiedLibgenId = lastModifiedNonFictionBook.LibgenId;
 }
        /// <summary>
        /// Run the recreate procedure
        /// </summary>
        /// <param name="dbparent">The database to restore into</param>
        /// <param name="updating">True if this is an update call, false otherwise</param>
        /// <param name="filter">A filter that can be used to disregard certain remote files, intended to be used to select a certain filelist</param>
        /// <param name="filenamefilter">Filters the files in a filelist to prevent downloading unwanted data</param>
        /// <param name="blockprocessor">A callback hook that can be used to work with downloaded block volumes, intended to be use to recover data blocks while processing blocklists</param>
        internal void DoRun(LocalDatabase dbparent, bool updating, Library.Utility.IFilter filter = null, NumberedFilterFilelistDelegate filelistfilter = null, BlockVolumePostProcessor blockprocessor = null)
        {
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Recreate_Running);

            //We build a local database in steps.
            using(var restoredb = new LocalRecreateDatabase(dbparent, m_options))
            using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, restoredb))
            {
				restoredb.RepairInProgress = true;

                var volumeIds = new Dictionary<string, long>();

                var rawlist = backend.List();
		
                //First step is to examine the remote storage to see what
                // kind of data we can find
                var remotefiles =
                (from x in rawlist
                let n = VolumeBase.ParseFilename(x)
                where
                    n != null
                        &&
                    n.Prefix == m_options.Prefix
                select n).ToArray(); //ToArray() ensures that we do not remote-request it multiple times

                if (remotefiles.Length == 0)
                {
                    if (rawlist.Count == 0)
                        throw new Exception("No files were found at the remote location, perhaps the target url is incorrect?");
                    else
                    {
                        var tmp = 
					(from x in rawlist
                		let n = VolumeBase.ParseFilename(x)
                	where
                    	n != null
                    select n.Prefix).ToArray();
                
                        var types = tmp.Distinct().ToArray();
                        if (tmp.Length == 0)
                            throw new Exception(string.Format("Found {0} files at the remote storage, but none that could be parsed", rawlist.Count));
                        else if (types.Length == 1)
                            throw new Exception(string.Format("Found {0} parse-able files with the prefix {1}, did you forget to set the backup-prefix?", tmp.Length, types[0]));
                        else
                            throw new Exception(string.Format("Found {0} parse-able files (of {1} files) with different prefixes: {2}, did you forget to set the backup-prefix?", tmp.Length, rawlist.Count, string.Join(", ", types)));
                    }
                }

                //Then we select the filelist we should work with,
                // and create the filelist table to fit
                IEnumerable<IParsedVolume> filelists =
                    from n in remotefiles
                    where n.FileType == RemoteVolumeType.Files
                    orderby n.Time descending
                    select n;

                if (filelists.Count() <= 0)
                    throw new Exception(string.Format("No filelists found on the remote destination"));
                
                if (filelistfilter != null)
                    filelists = filelistfilter(filelists).Select(x => x.Value).ToArray();

                if (filelists.Count() <= 0)
                    throw new Exception(string.Format("No filelists"));

                // If we are updating, all files should be accounted for
                foreach(var fl in remotefiles)
                    volumeIds[fl.File.Name] = updating ? restoredb.GetRemoteVolumeID(fl.File.Name) : restoredb.RegisterRemoteVolume(fl.File.Name, fl.FileType, fl.File.Size, RemoteVolumeState.Uploaded);

                var hasUpdatedOptions = false;

                if (updating)
                {
                    Utility.UpdateOptionsFromDb(restoredb, m_options);
                    Utility.VerifyParameters(restoredb, m_options);
                }

                //Record all blocksets and files needed
                using(var tr = restoredb.BeginTransaction())
                {
                    var filelistWork = (from n in filelists orderby n.Time select new RemoteVolume(n.File) as IRemoteVolume).ToList();
                    m_result.AddMessage(string.Format("Rebuild database started, downloading {0} filelists", filelistWork.Count));

                    var progress = 0;

                    // Register the files we are working with, if not already updated
                    if (updating)
                    {
                        foreach(var n in filelists)
                            if (volumeIds[n.File.Name] == -1)
                                volumeIds[n.File.Name] = restoredb.RegisterRemoteVolume(n.File.Name, n.FileType, RemoteVolumeState.Uploaded, n.File.Size, new TimeSpan(0), tr);
                    }
                                
                    var isFirstFilelist = true;
                    var blocksize = m_options.Blocksize;
                    var hashes_pr_block = blocksize / m_options.BlockhashSize;

                    foreach(var entry in new AsyncDownloader(filelistWork, backend))
                        try
                        {
                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                            {
                                backend.WaitForComplete(restoredb, null);
                                return;
                            }    
                        
                            progress++;
                            if (filelistWork.Count == 1 && m_options.RepairOnlyPaths)
                                m_result.OperationProgressUpdater.UpdateProgress(0.5f);
                            else
                                m_result.OperationProgressUpdater.UpdateProgress(((float)progress / filelistWork.Count()) * (m_options.RepairOnlyPaths ? 1f : 0.2f));

                            using(var tmpfile = entry.TempFile)
                            {
                                isFirstFilelist = false;

                                if (entry.Hash != null && entry.Size > 0)
                                    restoredb.UpdateRemoteVolume(entry.Name, RemoteVolumeState.Verified, entry.Size, entry.Hash, tr);

                                var parsed = VolumeBase.ParseFilename(entry.Name);

                                if (!hasUpdatedOptions && !updating) 
                                {
                                    VolumeReaderBase.UpdateOptionsFromManifest(parsed.CompressionModule, tmpfile, m_options);
                                    hasUpdatedOptions = true;
                                    // Recompute the cached sizes
                                    blocksize = m_options.Blocksize;
                                    hashes_pr_block = blocksize / m_options.BlockhashSize;
                                }


                                // Create timestamped operations based on the file timestamp
                                var filesetid = restoredb.CreateFileset(volumeIds[entry.Name], parsed.Time, tr);
                                using(var filelistreader = new FilesetVolumeReader(parsed.CompressionModule, tmpfile, m_options))
                                    foreach(var fe in filelistreader.Files.Where(x => Library.Utility.FilterExpression.Matches(filter, x.Path)))
                                    {
                                        try
                                        {
                                            if (fe.Type == FilelistEntryType.Folder)
                                            {
                                                restoredb.AddDirectoryEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                            }
                                            else if (fe.Type == FilelistEntryType.File)
                                            {
                                                var expectedblocks = (fe.Size + blocksize - 1)  / blocksize;
                                                var expectedblocklisthashes = (expectedblocks + hashes_pr_block - 1) / hashes_pr_block;
                                                if (expectedblocks <= 1) expectedblocklisthashes = 0;

                                                var blocksetid = restoredb.AddBlockset(fe.Hash, fe.Size, fe.BlocklistHashes, expectedblocklisthashes, tr);
                                                restoredb.AddFileEntry(filesetid, fe.Path, fe.Time, blocksetid, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                            }
                                            else if (fe.Type == FilelistEntryType.Symlink)
                                            {
                                                restoredb.AddSymlinkEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                            }
                                            else
                                            {
                                                m_result.AddWarning(string.Format("Skipping file-entry with unknown type {0}: {1} ", fe.Type, fe.Path), null);
                                            }
                                        }
                                        catch (Exception ex)
                                        {
                                            m_result.AddWarning(string.Format("Failed to process file-entry: {0}", fe.Path), ex);
                                        }
                                    }
                            }
                        }
                        catch (Exception ex)
                        {
                            m_result.AddWarning(string.Format("Failed to process file: {0}", entry.Name), ex);
                            if (ex is System.Threading.ThreadAbortException)
                                throw;

                            if (isFirstFilelist && ex is System.Security.Cryptography.CryptographicException)
                                throw;
                        }

                    //Make sure we write the config
                    if (!updating)
                        Utility.VerifyParameters(restoredb, m_options, tr);

                    using(new Logging.Timer("CommitUpdateFilesetFromRemote"))
                        tr.Commit();
                }
            
                if (!m_options.RepairOnlyPaths)
                {
                    var hashalg = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm);
                    if (hashalg == null)
                        throw new Exception(Strings.Common.InvalidHashAlgorithm(m_options.BlockHashAlgorithm));
                    var hashsize = hashalg.HashSize / 8;

                    //Grab all index files, and update the block table
                    using(var tr = restoredb.BeginTransaction())
                    {
                        var indexfiles = (
                                         from n in remotefiles
                                          where n.FileType == RemoteVolumeType.Index
                                          select new RemoteVolume(n.File) as IRemoteVolume).ToList();

                        m_result.AddMessage(string.Format("Filelists restored, downloading {0} index files", indexfiles.Count));

                        var progress = 0;
                                    
                        foreach(var sf in new AsyncDownloader(indexfiles, backend))
                            try
                            {
                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    backend.WaitForComplete(restoredb, null);
                                    return;
                                }

                                progress++;
                                m_result.OperationProgressUpdater.UpdateProgress((((float)progress / indexfiles.Count) * 0.5f) + 0.2f);

                                using(var tmpfile = sf.TempFile)
                                {
                                    if (sf.Hash != null && sf.Size > 0)
                                        restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Verified, sf.Size, sf.Hash, tr);
                
                                    using(var svr = new IndexVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options, hashsize))
                                    {
                                        foreach(var a in svr.Volumes)
                                        {
                                            var filename = a.Filename;
                                            var volumeID = restoredb.GetRemoteVolumeID(filename);

                                            // No such file
                                            if (volumeID < 0)
                                                volumeID = ProbeForMatchingFilename(ref filename, restoredb);

                                            // Still broken, register a missing item
                                            if (volumeID < 0)
                                            {
                                                var p = VolumeBase.ParseFilename(filename);
                                                if (p == null)
                                                    throw new Exception(string.Format("Unable to parse filename: {0}", filename));
                                                m_result.AddError(string.Format("Remote file referenced as {0}, but not found in list, registering a missing remote file", filename), null);
                                                volumeID = restoredb.RegisterRemoteVolume(filename, p.FileType, RemoteVolumeState.Verified, tr);
                                            }
                                            
                                            //Add all block/volume mappings
                                            foreach(var b in a.Blocks)
                                                restoredb.UpdateBlock(b.Key, b.Value, volumeID, tr);

                                            restoredb.UpdateRemoteVolume(filename, RemoteVolumeState.Verified, a.Length, a.Hash, tr);
                                            restoredb.AddIndexBlockLink(restoredb.GetRemoteVolumeID(sf.Name), volumeID, tr);
                                        }
                                
                                        //If there are blocklists in the index file, update the blocklists
                                        foreach(var b in svr.BlockLists)
                                            restoredb.UpdateBlockset(b.Hash, b.Blocklist, tr);
                                    }
                                }
                            }
                            catch (Exception ex)
                            {
                                //Not fatal
                                m_result.AddWarning(string.Format("Failed to process index file: {0}", sf.Name), ex);
                                if (ex is System.Threading.ThreadAbortException)
                                    throw;
                            }

                        using(new Logging.Timer("CommitRecreatedDb"))
                            tr.Commit();
                    
                        // TODO: In some cases, we can avoid downloading all index files, 
                        // if we are lucky and pick the right ones
                    }

                    // We have now grabbed as much information as possible,
                    // if we are still missing data, we must now fetch block files
                    restoredb.FindMissingBlocklistHashes(hashsize, m_options.Blocksize, null);
                
                    //We do this in three passes
                    for(var i = 0; i < 3; i++)
                    {
                        // Grab the list matching the pass type
						var lst = restoredb.GetMissingBlockListVolumes(i, m_options.Blocksize, hashsize).ToList();
                        if (lst.Count > 0)
                        {
                            switch (i)
                            {
                                case 0:
                                    if (m_options.Verbose)
                                        m_result.AddVerboseMessage("Processing required {0} blocklist volumes: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name)));
                                    else
                                        m_result.AddMessage(string.Format("Processing required {0} blocklist volumes", lst.Count));
                                    break;
                                case 1:
                                    if (m_options.Verbose)
                                        m_result.AddVerboseMessage("Probing {0} candidate blocklist volumes: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name)));
                                    else
                                        m_result.AddMessage(string.Format("Probing {0} candidate blocklist volumes", lst.Count));
                                    break;
                                default:
                                    if (m_options.Verbose)
                                        m_result.AddVerboseMessage("Processing all of the {0} volumes for blocklists: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name)));
                                    else
                                        m_result.AddMessage(string.Format("Processing all of the {0} volumes for blocklists", lst.Count));
                                    break;
                            }
                        }

                        var progress = 0;
                        foreach(var sf in new AsyncDownloader(lst, backend))
                            using(var tmpfile = sf.TempFile)
                            using(var rd = new BlockVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options))
                            using(var tr = restoredb.BeginTransaction())
                            {
                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    backend.WaitForComplete(restoredb, null);
                                    return;
                                }    
                            
                                progress++;
                                m_result.OperationProgressUpdater.UpdateProgress((((float)progress / lst.Count) * 0.1f) + 0.7f + (i * 0.1f));

                                var volumeid = restoredb.GetRemoteVolumeID(sf.Name);

                                restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Uploaded, sf.Size, sf.Hash, tr);
                            
                                // Update the block table so we know about the block/volume map
                                foreach(var h in rd.Blocks)
                                    restoredb.UpdateBlock(h.Key, h.Value, volumeid, tr);
                            
                                // Grab all known blocklists from the volume
                                foreach(var blocklisthash in restoredb.GetBlockLists(volumeid))
                                    restoredb.UpdateBlockset(blocklisthash, rd.ReadBlocklist(blocklisthash, hashsize), tr);
    
                                // Update tables so we know if we are done
                                restoredb.FindMissingBlocklistHashes(hashsize, m_options.Blocksize, tr);
                        
                                using(new Logging.Timer("CommitRestoredBlocklist"))
                                    tr.Commit();
    
                                //At this point we can patch files with data from the block volume
                                if (blockprocessor != null)
                                    blockprocessor(sf.Name, rd);
                            }
                    }
                }
                
				backend.WaitForComplete(restoredb, null);

                m_result.AddMessage("Recreate completed, verifying the database consistency");

                //All done, we must verify that we have all blocklist fully intact
                // if this fails, the db will not be deleted, so it can be used,
                // except to continue a backup
                restoredb.VerifyConsistency(null, m_options.Blocksize, m_options.BlockhashSize);

                m_result.AddMessage("Recreate completed, and consistency checks completed, marking database as complete");

				restoredb.RepairInProgress = false;
            }
        }
示例#38
0
        internal static void VerifyParameters(LocalDatabase db, Options options, System.Data.IDbTransaction transaction = null)
        {
            var newDict = new Dictionary<string, string>();
            newDict.Add("blocksize", options.Blocksize.ToString());
            newDict.Add("blockhash", options.BlockHashAlgorithm);
            newDict.Add("filehash", options.FileHashAlgorithm);
            var opts = db.GetDbOptions(transaction);

            if (options.NoEncryption)
            {
                newDict.Add("passphrase", "no-encryption");
            }
            else
            {
                string salt;
                opts.TryGetValue("passphrase-salt", out salt);
                if (string.IsNullOrEmpty(salt))
                {
                    // Not Crypto-class PRNG salts
                    var buf = new byte[32];
                    new Random().NextBytes(buf);
                    //Add version so we can detect and change the algorithm
                    salt = "v1:" + Library.Utility.Utility.ByteArrayAsHexString(buf);
                }

                newDict["passphrase-salt"] = salt;

                // We avoid storing the passphrase directly,
                // instead we salt and rehash repeatedly
                newDict.Add("passphrase", Library.Utility.Utility.ByteArrayAsHexString(Library.Utility.Utility.RepeatedHashWithSalt(options.Passphrase, salt, 1200)));
            }

            var needsUpdate = false;
            foreach(var k in newDict)
                if (!opts.ContainsKey(k.Key))
                    needsUpdate = true;
                else if (opts[k.Key] != k.Value)
                {
                    if (k.Key == "passphrase")
                    {
                        if (!options.AllowPassphraseChange)
                        {
                            if (newDict[k.Key] == "no-encryption")
                                throw new Exception("Unsupported removal of passphrase");
                            else if (opts[k.Key] == "no-encryption")
                                throw new Exception("Unsupported addition of passphrase");
                            else
                                throw new Exception("Unsupported change of passphrase");
                        }
                    }
                    else
                        throw new Exception(string.Format("Unsupported change of parameter \"{0}\" from \"{1}\" to \"{2}\"", k.Key, opts[k.Key], k.Value));

                }

            //Extra sanity check
            if (db.GetBlocksLargerThan(options.Blocksize) > 0)
                throw new Exception("Unsupported block-size change detected");

            if (needsUpdate)
                db.SetDbOptions(newDict, transaction);
        }
示例#39
0
        public void WaitForComplete(LocalDatabase db, System.Data.IDbTransaction transation)
        {
            m_db.FlushDbMessages(db, transation);
            if (m_lastException != null)
                throw m_lastException;

            var item = new FileEntryItem(OperationType.Terminate, null);
            if (m_queue.Enqueue(item))
                item.WaitForComplete();

            m_db.FlushDbMessages(db, transation);

            if (m_lastException != null)
                throw m_lastException;
        }
示例#40
0
 public LocalRestoreDatabase(LocalDatabase dbparent)
     : base(dbparent)
 {
 }
示例#41
0
 public DatabaseCollector(LocalDatabase database, IBackendWriter stats)
 {
     m_database = database;
     m_stats = stats;
     m_dbqueue = new List<IDbEntry>();
     if (m_database != null)
         m_callerThread = System.Threading.Thread.CurrentThread;
 }
示例#42
0
        /// <summary>
        /// Uploads the verification file.
        /// </summary>
        /// <param name="backendurl">The backend url</param>
        /// <param name="options">The options to use</param>
        /// <param name="result">The result writer</param>
        /// <param name="db">The attached database</param>
        /// <param name="transaction">An optional transaction object</param>
        public static void UploadVerificationFile(string backendurl, Options options, IBackendWriter result, LocalDatabase db, System.Data.IDbTransaction transaction)
        {
            using(var backend = new BackendManager(backendurl, options, result, db))
            using(var tempfile = new Library.Utility.TempFile())
            {
                var remotename = options.Prefix + "-verification.json";
                using(var stream = new System.IO.StreamWriter(tempfile, false, System.Text.Encoding.UTF8))
                    FilelistProcessor.CreateVerificationFile(db, stream);

                if (options.Dryrun)
                {
                    result.AddDryrunMessage(string.Format("Would upload verification file: {0}, size: {1}", remotename, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(tempfile).Length)));
                }
                else
                {
                    backend.PutUnencrypted(remotename, tempfile);
                    backend.WaitForComplete(db, transaction);
                }
            }
        }
示例#43
0
 public LocalRestoreDatabase(LocalDatabase dbparent, long blocksize)
     : base(dbparent)
 {
     //TODO: Should read this from DB?
     m_blocksize = blocksize;
 }
示例#44
0
        /// <summary>
        /// Helper method that verifies uploaded volumes and updates their state in the database.
        /// Throws an error if there are issues with the remote storage
        /// </summary>
        /// <param name="backend">The backend instance to use</param>
        /// <param name="options">The options used</param>
        /// <param name="database">The database to compare with</param>
        public static RemoteAnalysisResult RemoteListAnalysis(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log)
        {
            var rawlist = backend.List();
            var lookup = new Dictionary<string, Volumes.IParsedVolume>();

            var remotelist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p != null select p).ToList();
            var unknownlist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p == null select n).ToList();
            var filesets = (from n in remotelist where n.FileType == RemoteVolumeType.Files orderby n.Time descending select n).ToList();

            log.KnownFileCount = remotelist.Count();
            log.KnownFileSize = remotelist.Select(x => x.File.Size).Sum();
            log.UnknownFileCount = unknownlist.Count();
            log.UnknownFileSize = unknownlist.Select(x => x.Size).Sum();
            log.BackupListCount = filesets.Count;
            log.LastBackupDate = filesets.Count == 0 ? new DateTime(0) : filesets[0].Time.ToLocalTime();

            if (backend is Library.Interface.IQuotaEnabledBackend)
            {
                log.TotalQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).TotalQuotaSpace;
                log.FreeQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).FreeQuotaSpace;
            }

            log.AssignedQuotaSpace = options.QuotaSize;

            foreach (var s in remotelist)
                if (s.Prefix == options.Prefix)
                    lookup[s.File.Name] = s;

            var missing = new List<RemoteVolumeEntry>();
            var locallist = database.GetRemoteVolumes();
            foreach (var i in locallist)
            {
                //Ignore those that are deleted
                if (i.State == RemoteVolumeState.Deleted)
                    continue;

                if (i.State == RemoteVolumeState.Temporary)
                {
                    log.AddMessage(string.Format("removing file listed as {0}: {1}", i.State, i.Name));
                    database.RemoveRemoteVolume(i.Name, null);
                }
                else if (i.State == RemoteVolumeState.Deleting && lookup.ContainsKey(i.Name))
                {
                    log.AddMessage(string.Format("removing remote file listed as {0}: {1}", i.State, i.Name));
                    backend.Delete(i.Name, i.Size, true);
                    lookup.Remove(i.Name);
                }
                else
                {
                    Volumes.IParsedVolume r;
                    if (!lookup.TryGetValue(i.Name, out r))
                    {
                        if (i.State == RemoteVolumeState.Uploading || i.State == RemoteVolumeState.Deleting || (r != null && r.File.Size != i.Size && r.File.Size >= 0 && i.Size >= 0))
                        {
                            log.AddMessage(string.Format("removing file listed as {0}: {1}", i.State, i.Name));
                            database.RemoveRemoteVolume(i.Name, null);
                        }
                        else
                            missing.Add(i);
                    }
                    else if (i.State != RemoteVolumeState.Verified)
                    {
                        database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Verified, i.Size, i.Hash);
                    }

                    lookup.Remove(i.Name);
                }
            }

            return new RemoteAnalysisResult() { ParsedVolumes = remotelist, ExtraVolumes = lookup.Values, MissingVolumes = missing };
        }
示例#45
0
 protected override void InsertBatch(List <NonFictionBook> objectBatch)
 {
     LocalDatabase.AddNonFictionBooks(objectBatch);
 }
示例#46
0
        public ControlIncoming()
        {
            ActualDate = DateTime.Now;

            database = LocalDatabase.GetInstance();
        }
示例#47
0
 public LocalTestDatabase(LocalDatabase parent)
     : base(parent)
 {        
 }
示例#48
0
 protected override void UpdateBatch(List <NonFictionBook> objectBatch)
 {
     LocalDatabase.UpdateNonFictionBooks(objectBatch);
 }
示例#49
0
        /// <summary>
        /// Helper method that verifies uploaded volumes and updates their state in the database.
        /// Throws an error if there are issues with the remote storage
        /// </summary>
        /// <param name="backend">The backend instance to use</param>
        /// <param name="options">The options used</param>
        /// <param name="database">The database to compare with</param>
        public static RemoteAnalysisResult RemoteListAnalysis(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log)
        {
            var rawlist = backend.List();
            var lookup = new Dictionary<string, Volumes.IParsedVolume>();

            var remotelist = (from n in rawlist
                                       let p = Volumes.VolumeBase.ParseFilename(n)
                                        where p != null && p.Prefix == options.Prefix
                                       select p).ToList();

            var otherlist = (from n in rawlist
                                let p = Volumes.VolumeBase.ParseFilename(n)
                                where p != null && p.Prefix != options.Prefix
                                select p).ToList();

            var unknownlist = (from n in rawlist
                                        let p = Volumes.VolumeBase.ParseFilename(n)
                                        where p == null
                                        select n).ToList();

            var filesets = (from n in remotelist
                                     where n.FileType == RemoteVolumeType.Files orderby n.Time descending
                                     select n).ToList();

            log.KnownFileCount = remotelist.Count();
            log.KnownFileSize = remotelist.Select(x => x.File.Size).Sum();
            log.UnknownFileCount = unknownlist.Count();
            log.UnknownFileSize = unknownlist.Select(x => x.Size).Sum();
            log.BackupListCount = filesets.Count;
            log.LastBackupDate = filesets.Count == 0 ? new DateTime(0) : filesets[0].Time.ToLocalTime();

            if (backend is Library.Interface.IQuotaEnabledBackend)
            {
                log.TotalQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).TotalQuotaSpace;
                log.FreeQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).FreeQuotaSpace;
            }

            log.AssignedQuotaSpace = options.QuotaSize;

            foreach(var s in remotelist)
                lookup[s.File.Name] = s;

            var missing = new List<RemoteVolumeEntry>();
            var missingHash = new List<Tuple<long, RemoteVolumeEntry>>();
            var cleanupRemovedRemoteVolumes = new HashSet<string>();

            foreach(var e in database.DuplicateRemoteVolumes())
            {
                if (e.Value == RemoteVolumeState.Uploading || e.Value == RemoteVolumeState.Temporary)
                    database.UnlinkRemoteVolume(e.Key, e.Value);
                else
                    throw new Exception(string.Format("The remote volume {0} appears in the database with state {1} and a deleted state, cannot continue", e.Key, e.Value.ToString()));
            }

            var locallist = database.GetRemoteVolumes();
            foreach(var i in locallist)
            {
                Volumes.IParsedVolume r;
                var remoteFound = lookup.TryGetValue(i.Name, out r);
                var correctSize = remoteFound && i.Size >= 0 && (i.Size == r.File.Size || r.File.Size < 0);

                lookup.Remove(i.Name);

                switch (i.State)
                {
                    case RemoteVolumeState.Deleted:
                        if (remoteFound)
                            log.AddMessage(string.Format("ignoring remote file listed as {0}: {1}", i.State, i.Name));

                        break;

                    case RemoteVolumeState.Temporary:
                    case RemoteVolumeState.Deleting:
                        if (remoteFound)
                        {
                            log.AddMessage(string.Format("removing remote file listed as {0}: {1}", i.State, i.Name));
                            backend.Delete(i.Name, i.Size, true);
                        }
                        else
                        {
                            if (i.deleteGracePeriod > DateTime.UtcNow)
                            {
                                log.AddMessage(string.Format("keeping delete request for {0} until {1}", i.Name, i.deleteGracePeriod.ToLocalTime()));
                            }
                            else
                            {
                                log.AddMessage(string.Format("removing file listed as {0}: {1}", i.State, i.Name));
                                cleanupRemovedRemoteVolumes.Add(i.Name);
                            }
                        }
                        break;
                    case RemoteVolumeState.Uploading:
                        if (remoteFound && correctSize && r.File.Size >= 0)
                        {
                            log.AddMessage(string.Format("promoting uploaded complete file from {0} to {2}: {1}", i.State, i.Name, RemoteVolumeState.Uploaded));
                            database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Uploaded, i.Size, i.Hash);
                        }
                        else if (!remoteFound)
                        {
                            log.AddMessage(string.Format("scheduling missing file for deletion, currently listed as {0}: {1}", i.State, i.Name));
                            cleanupRemovedRemoteVolumes.Add(i.Name);
                            database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Deleting, i.Size, i.Hash, false, TimeSpan.FromHours(2), null);
                        }
                        else
                        {
                            log.AddMessage(string.Format("removing incomplete remote file listed as {0}: {1}", i.State, i.Name));
                            backend.Delete(i.Name, i.Size, true);
                        }
                        break;

                    case RemoteVolumeState.Uploaded:
                        if (!remoteFound)
                            missing.Add(i);
                        else if (correctSize)
                            database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Verified, i.Size, i.Hash);
                        else
                            missingHash.Add(new Tuple<long, RemoteVolumeEntry>(r.File.Size, i));

                        break;

                    case RemoteVolumeState.Verified:
                        if (!remoteFound)
                            missing.Add(i);
                        else if (!correctSize)
                            missingHash.Add(new Tuple<long, RemoteVolumeEntry>(r.File.Size, i));

                        break;

                    default:
                        log.AddWarning(string.Format("unknown state for remote file listed as {0}: {1}", i.State, i.Name), null);
                        break;
                }

                backend.FlushDbMessages();
            }

            // cleanup deleted volumes in DB en block
            database.RemoveRemoteVolumes(cleanupRemovedRemoteVolumes, null);

            foreach(var i in missingHash)
                log.AddWarning(string.Format("remote file {1} is listed as {0} with size {2} but should be {3}, please verify the sha256 hash \"{4}\"", i.Item2.State, i.Item2.Name, i.Item1, i.Item2.Size, i.Item2.Hash), null);

            return new RemoteAnalysisResult() {
                ParsedVolumes = remotelist,
                OtherVolumes = otherlist,
                ExtraVolumes = lookup.Values,
                MissingVolumes = missing,
                VerificationRequiredVolumes = missingHash.Select(x => x.Item2)
            };
        }
示例#50
0
 protected override int?GetExistingObjectIdByLibgenId(int libgenId)
 {
     return(LocalDatabase.GetNonFictionBookIdByLibgenId(libgenId));
 }
示例#51
0
 /// <summary>
 /// Creates a temporary verification file.
 /// </summary>
 /// <returns>The verification file.</returns>
 /// <param name="db">The database instance</param>
 /// <param name="stream">The stream to write to</param>
 public static void CreateVerificationFile(LocalDatabase db, System.IO.StreamWriter stream)
 {
     var s = new Newtonsoft.Json.JsonSerializer();
     s.Serialize(stream, db.GetRemoteVolumes().Cast<IRemoteVolume>().ToArray());
 }
示例#52
0
 protected override void InsertBatch(List <SciMagArticle> objectBatch)
 {
     LocalDatabase.AddSciMagArticles(objectBatch);
 }
示例#53
0
        private void DoRun(LocalDatabase dbparent, Library.Utility.IFilter filter, RestoreResults result)
        {
            //In this case, we check that the remote storage fits with the database.
            //We can then query the database and find the blocks that we need to do the restore
            using(var database = new LocalRestoreDatabase(dbparent, m_options.Blocksize))
            using(var backend = new BackendManager(m_backendurl, m_options, result.BackendWriter, database))
            {
                database.SetResult(m_result);
                Utility.VerifyParameters(database, m_options);
	        	
                var blockhasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm);
                var filehasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.FileHashAlgorithm);
                if (blockhasher == null)
                    throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.BlockHashAlgorithm));
                if (!blockhasher.CanReuseTransform)
                    throw new Exception(string.Format(Strings.Foresthash.InvalidCryptoSystem, m_options.BlockHashAlgorithm));

                if (filehasher == null)
                    throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.FileHashAlgorithm));
                if (!filehasher.CanReuseTransform)
                    throw new Exception(string.Format(Strings.Foresthash.InvalidCryptoSystem, m_options.FileHashAlgorithm));

                if (!m_options.NoBackendverification)
                {
                    m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_PreRestoreVerify);                
                    FilelistProcessor.VerifyRemoteList(backend, m_options, database, result.BackendWriter);
                }

                //Figure out what files are to be patched, and what blocks are needed
                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_CreateFileList);
                using(new Logging.Timer("PrepareBlockList"))
                    PrepareBlockAndFileList(database, m_options, filter, result);

                //Make the entire output setup
                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_CreateTargetFolders);
                using(new Logging.Timer("CreateDirectory"))
                    CreateDirectoryStructure(database, m_options, result);
                
                //If we are patching an existing target folder, do not touch stuff that is already updated
                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_ScanForExistingFiles);
                using(new Logging.Timer("ScanForexistingTargetBlocks"))
                    ScanForExistingTargetBlocks(database, m_blockbuffer, blockhasher, filehasher, m_options, result);

                //Look for existing blocks in the original source files only
                using(new Logging.Timer("ScanForExistingSourceBlocksFast"))
#if DEBUG
                    if (!m_options.NoLocalBlocks && !string.IsNullOrEmpty(m_options.Restorepath))
#else
				    if (!string.IsNullOrEmpty(m_options.Restorepath))
#endif
                    {
                        m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_ScanForLocalBlocks);
                        ScanForExistingSourceBlocksFast(database, m_options, m_blockbuffer, blockhasher, result);
                    }

                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                {
                    backend.WaitForComplete(database, null);
                    return;
                }

                // If other local files already have the blocks we want, we use them instead of downloading
                if (m_options.PatchWithLocalBlocks)
                {
                    m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_PatchWithLocalBlocks);
                    using(new Logging.Timer("PatchWithLocalBlocks"))
                        ScanForExistingSourceBlocks(database, m_options, m_blockbuffer, blockhasher, result);
                }

                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                {
                    backend.WaitForComplete(database, null);
                    return;
                }
                
                // Fill BLOCKS with remote sources
                var volumes = database.GetMissingVolumes().ToList();

                if (volumes.Count > 0)
                {
                    m_result.AddMessage(string.Format("{0} remote files are required to restore", volumes.Count));
                    m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_DownloadingRemoteFiles);
                }

                var brokenFiles = new List<string>();
				foreach(var blockvolume in new AsyncDownloader(volumes, backend))
					try
					{
                        if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                        {
                            backend.WaitForComplete(database, null);
                            return;
                        }
                    
						using(var tmpfile = blockvolume.TempFile)
						using(var blocks = new BlockVolumeReader(GetCompressionModule(blockvolume.Name), tmpfile, m_options))
							PatchWithBlocklist(database, blocks, m_options, result, m_blockbuffer);
					}
					catch (Exception ex)
					{
                        brokenFiles.Add(blockvolume.Name);
                        result.AddError(string.Format("Failed to patch with remote file: \"{0}\", message: {1}", blockvolume.Name, ex.Message), ex);
                        if (ex is System.Threading.ThreadAbortException)
                            throw;
					}
                
                // Reset the filehasher if it was used to verify existing files
                filehasher.Initialize();
					
                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                    return;
                
                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_PostRestoreVerify);
                
                var fileErrors = 0L;
                // After all blocks in the files are restored, verify the file hash
                using(new Logging.Timer("RestoreVerification"))
                    foreach (var file in database.GetFilesToRestore())
                    {
                        try
                        {
                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                            {
                                backend.WaitForComplete(database, null);
                                return;
                            }
                            
                            result.AddVerboseMessage("Testing restored file integrity: {0}", file.Path);
                            
                            string key;
                            long size;
                            using (var fs = m_systemIO.FileOpenRead(file.Path))
                            {
                                size = fs.Length;
                                key = Convert.ToBase64String(filehasher.ComputeHash(fs));
                            }
    
                            if (key != file.Hash)
                                throw new Exception(string.Format("Failed to restore file: \"{0}\". File hash is {1}, expected hash is {2}", file.Path, key, file.Hash));
                            result.FilesRestored++;
                            result.SizeOfRestoredFiles += size;
                        } 
                        catch (Exception ex)
                        {
                            fileErrors++;
                            result.AddWarning(ex.Message, ex);
                            if (ex is System.Threading.ThreadAbortException)
                                throw;
                        }
                    }
                    
                if (fileErrors > 0 && brokenFiles.Count > 0)
                    m_result.AddMessage(string.Format("Failed to restore {0} files, additionally the following files failed to download, which may be the cause:{1}", fileErrors, Environment.NewLine, string.Join(Environment.NewLine, brokenFiles)));

                // Drop the temp tables
                database.DropRestoreTable();
                backend.WaitForComplete(database, null);
            }
            
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_Complete);
            result.EndTime = DateTime.UtcNow;
        }
示例#54
0
        /// <summary>This will be called when _revsToInsert fills up:</summary>
        public void InsertRevisions(IList <IList <Object> > revs)
        {
            Log.I(Tag, this + " inserting " + revs.Count + " revisions...");
            //Log.v(Database.TAG, String.format("%s inserting %s", this, revs));

            revs.Sort(new RevisionComparer());

            if (LocalDatabase == null)
            {
                AsyncTaskFinished(revs.Count);
                return;
            }

            LocalDatabase.BeginTransaction();

            var success = false;

            try
            {
                foreach (var revAndHistory in revs)
                {
                    var rev          = (PulledRevision)revAndHistory[0];
                    var fakeSequence = rev.GetSequence();
                    var history      = (IList <String>)revAndHistory[1];

                    // Insert the revision:
                    try
                    {
                        LocalDatabase.ForceInsert(rev, history, RemoteUrl);
                    }
                    catch (CouchbaseLiteException e)
                    {
                        if (e.GetCBLStatus().GetCode() == StatusCode.Forbidden)
                        {
                            Log.I(Tag, this + ": Remote rev failed validation: " + rev);
                        }
                        else
                        {
                            Log.W(Tag, this + " failed to write " + rev + ": status=" + e.GetCBLStatus().GetCode());
                            LastError = new HttpResponseException(e.GetCBLStatus().GetHttpStatusCode());
                            continue;
                        }
                    }
                    pendingSequences.RemoveSequence(fakeSequence);
                }

                Log.W(Tag, this + " finished inserting " + revs.Count + " revisions");

                LastSequence = pendingSequences.GetCheckpointedValue();
                success      = true;
            }
            catch (SQLException e)
            {
                Log.E(Tag, this + ": Exception inserting revisions", e);
            }
            finally
            {
                LocalDatabase.EndTransaction(success);
                Log.D(Tag, this + "|" + Thread.CurrentThread() + ": insertRevisions() calling asyncTaskFinished()");
                AsyncTaskFinished(revs.Count);
            }
            CompletedChangesCount += revs.Count;
        }
示例#55
0
        internal override void ProcessInbox(RevisionList inbox)
        {
            var lastInboxSequence = inbox[inbox.Count - 1].GetSequence();
            // Generate a set of doc/rev IDs in the JSON format that _revs_diff wants:
            // <http://wiki.apache.org/couchdb/HttpPostRevsDiff>
            var diffs = new Dictionary <String, IList <String> >();

            foreach (var rev in inbox)
            {
                var docID = rev.GetDocId();
                var revs  = diffs.Get(docID);
                if (revs == null)
                {
                    revs         = new AList <String>();
                    diffs[docID] = revs;
                }
                revs.AddItem(rev.GetRevId());
            }

            // Call _revs_diff on the target db:
            Log.D(Tag, this + "|" + Thread.CurrentThread() + ": processInbox() calling asyncTaskStarted()");
            Log.D(Tag, this + "|" + Thread.CurrentThread() + ": posting to /_revs_diff: " + diffs);

            AsyncTaskStarted();
            SendAsyncRequest(HttpMethod.Post, "/_revs_diff", diffs, (response, e) =>
            {
                try {
                    Log.D(Tag, this + "|" + Thread.CurrentThread() + ": /_revs_diff response: " + response);

                    var responseData = (JObject)response;
                    var results      = responseData.ToObject <IDictionary <string, object> >();

                    if (e != null)
                    {
                        LastError = e;
                        RevisionFailed();
                        //Stop ();
                    }
                    else
                    {
                        if (results.Count != 0)
                        {
                            // Go through the list of local changes again, selecting the ones the destination server
                            // said were missing and mapping them to a JSON dictionary in the form _bulk_docs wants:
                            var docsToSend = new AList <object> ();

                            foreach (var rev in inbox)
                            {
                                IDictionary <string, object> properties = null;
                                var resultDocData = (JObject)results.Get(rev.GetDocId());
                                var resultDoc     = resultDocData.ToObject <IDictionary <String, Object> >();
                                if (resultDoc != null)
                                {
                                    var revs = ((JArray)resultDoc.Get("missing")).Values <String>().ToList();
                                    if (revs != null && revs.Contains(rev.GetRevId()))
                                    {
                                        //remote server needs this revision
                                        // Get the revision's properties
                                        if (rev.IsDeleted())
                                        {
                                            properties = new Dictionary <string, object> ();
                                            properties.Put("_id", rev.GetDocId());
                                            properties.Put("_rev", rev.GetRevId());
                                            properties.Put("_deleted", true);
                                        }
                                        else
                                        {
                                            // OPT: Shouldn't include all attachment bodies, just ones that have changed
                                            var contentOptions = EnumSet.Of(TDContentOptions.TDIncludeAttachments, TDContentOptions.TDBigAttachmentsFollow);
                                            try {
                                                LocalDatabase.LoadRevisionBody(rev, contentOptions);
                                            } catch (CouchbaseLiteException e1) {
                                                Log.W(Tag, string.Format("%s Couldn't get local contents of %s", rev, this));
                                                RevisionFailed();
                                                continue;
                                            }
                                            properties = new Dictionary <String, Object> (rev.GetProperties());
                                        }
                                        if (properties.ContainsKey("_attachments"))
                                        {
                                            if (UploadMultipartRevision(rev))
                                            {
                                                continue;
                                            }
                                        }
                                        if (properties != null)
                                        {
                                            // Add the _revisions list:
                                            properties.Put("_revisions", LocalDatabase.GetRevisionHistoryDict(rev));
                                            //now add it to the docs to send
                                            docsToSend.AddItem(properties);
                                        }
                                    }
                                }
                            }
                            // Post the revisions to the destination. "new_edits":false means that the server should
                            // use the given _rev IDs instead of making up new ones.
                            var numDocsToSend = docsToSend.Count;
                            if (numDocsToSend > 0)
                            {
                                var bulkDocsBody = new Dictionary <String, Object> ();
                                bulkDocsBody.Put("docs", docsToSend);
                                bulkDocsBody.Put("new_edits", false);

                                Log.V(Tag, string.Format("%s: POSTing " + numDocsToSend + " revisions to _bulk_docs: %s", this, docsToSend));

                                ChangesCount += numDocsToSend;

                                Log.D(Tag, this + "|" + Thread.CurrentThread() + ": processInbox-before_bulk_docs() calling asyncTaskStarted()");

                                AsyncTaskStarted();
                                SendAsyncRequest(HttpMethod.Post, "/_bulk_docs", bulkDocsBody, (result, ex) => {
                                    try
                                    {
                                        if (ex != null)
                                        {
                                            LastError = ex;
                                            RevisionFailed();
                                        }
                                        else
                                        {
                                            Log.V(Tag, string.Format("%s: POSTed to _bulk_docs: %s", this, docsToSend));
                                            LastSequence = string.Format("{0}", lastInboxSequence);
                                        }
                                        CompletedChangesCount += numDocsToSend;
                                    }
                                    finally
                                    {
                                        AsyncTaskFinished(1);
                                    }
                                });
                            }
                        }
                        else
                        {
                            // If none of the revisions are new to the remote, just bump the lastSequence:
                            LastSequence = string.Format("{0}", lastInboxSequence);
                        }
                    }
                }
                finally
                {
                    Log.D(Tag, this + "|" + Thread.CurrentThread() + ": processInbox() calling asyncTaskFinished()");
                    AsyncTaskFinished(1);
                }
            });
        }
示例#56
0
        private bool UploadMultipartRevision(RevisionInternal revision)
        {
            MultipartFormDataContent multiPart = null;
            var revProps = revision.GetProperties();

            revProps.Put("_revisions", LocalDatabase.GetRevisionHistoryDict(revision));

            var attachments = (IDictionary <string, object>)revProps.Get("_attachments");

            foreach (var attachmentKey in attachments.Keys)
            {
                var attachment = (IDictionary <String, Object>)attachments.Get(attachmentKey);
                if (attachment.ContainsKey("follows"))
                {
                    if (multiPart == null)
                    {
                        multiPart = new MultipartFormDataContent();
                        try
                        {
                            var json        = Manager.GetObjectMapper().WriteValueAsString(revProps);
                            var utf8charset = Encoding.UTF8;
                            multiPart.Add(new StringContent(json, utf8charset, "application/json"), "param1");
                        }
                        catch (IOException e)
                        {
                            throw new ArgumentException("Not able to serialize revision properties into a multipart request content.", e);
                        }
                    }

                    var blobStore    = LocalDatabase.Attachments;
                    var base64Digest = (string)attachment.Get("digest");

                    var blobKey     = new BlobKey(base64Digest);
                    var inputStream = blobStore.BlobStreamForKey(blobKey);

                    if (inputStream == null)
                    {
                        Log.W(Tag, "Unable to find blob file for blobKey: " + blobKey + " - Skipping upload of multipart revision.");
                        multiPart = null;
                    }
                    else
                    {
                        string contentType = null;
                        if (attachment.ContainsKey("content_type"))
                        {
                            contentType = (string)attachment.Get("content_type");
                        }
                        else
                        {
                            if (attachment.ContainsKey("content-type"))
                            {
                                var message = string.Format("Found attachment that uses content-type"
                                                            + " field name instead of content_type (see couchbase-lite-android"
                                                            + " issue #80): " + attachment);
                                Log.W(Tag, message);
                            }
                        }

                        var content = new StreamContent(inputStream);
                        content.Headers.ContentType = new MediaTypeHeaderValue(contentType);

                        multiPart.Add(content, attachmentKey);
                    }
                }
            }

            if (multiPart == null)
            {
                return(false);
            }

            var path = string.Format("/{0}?new_edits=false", revision.GetDocId());

            // TODO: need to throttle these requests
            Log.D(Tag, "Uploadeding multipart request.  Revision: " + revision);
            Log.D(Tag, this + "|" + Thread.CurrentThread() + ": uploadMultipartRevision() calling asyncTaskStarted()");

            AsyncTaskStarted();

            SendAsyncMultipartRequest(HttpMethod.Put, path, multiPart, (result, e) => {
                try
                {
                    if (e != null)
                    {
                        Log.E(Tag, "Exception uploading multipart request", e);
                        LastError = e;
                        RevisionFailed();
                    }
                    else
                    {
                        Log.D(Tag, "Uploaded multipart request.  Result: " + result);
                    }
                }
                finally
                {
                    AsyncTaskFinished(1);
                }
            });
            return(true);
        }
示例#57
0
		public LocalDeleteDatabase(LocalDatabase db)
			: base(db)
		{
			InitializeCommands();
		}
示例#58
0
        private Trajet AddtrajetForTest()
        {
            Trajet test = new Trajet();

            test.m_sRefNumber     = "B";
            test.m_datetrajet     = DateTime.Now;
            test.m_sLocationStart = "Grenoble - Presqu'île";
            test.m_sLocationEnd   = "Plaine des Sports";
            test.m_sTimeStart     = "6:57";
            test.m_sTimeEnd       = "7:34";
            test.m_sTrainType     = "Tramway";
            test.m_sLocoType      = "Electrique";
            test.m_sWeight        = "56";
            test.m_lsarret        = new List <Arret>();
            test.m_lsarret.Add(new Arret("Grenoble - Presqu'île", "0", "", "", 0, "6:57", "", "", "", 45.204441, 5.701218, 0, true, "", "S", false));
            test.m_lsarret.Add(new Arret("Cité Internationale", "0", "", "", 0, "7:00", "", "", "", 45.195042, 5.711121, 0, true, "", "S", false));
            test.m_lsarret.Add(new Arret("Palais de Justice", "0", "", "", 0, "7:03", "", "", "", 45.191158, 5.711664, 0, true, "", "S", false));
            test.m_lsarret.Add(new Arret("Saint Bruno", "0", "", "", 0, "7:05", "", "", "", 45.190149, 5.715212, 0, true, "", "S", false));
            test.m_lsarret.Add(new Arret("Gare", "0", "", "0", 5, "7:07", "", "", "V2", 45.190149, 5.715212, 7, true, "", "S", false));
            test.m_lsarret.Add(new Arret("Alsace lorraine", "0", " ", "30", 0, "7:09", "", "", "V2", 45.189394, 5.720446, 2, false, "", "S", false));
            test.m_lsarret.Add(new Arret("Victor Hugo", "0", "", "70", 2, "7:11", "", "", "V2", 45.189799, 5.725152, 6, true, "", "S", false));
            test.m_lsarret.Add(new Arret("Maison du tourisme", "0", "", "70", 8, "7:12", "", "", "V2", 45.190195, 5.728310, 2, false, "", "S", false));
            test.m_lsarret.Add(new Arret("Sainte Claire les halles", "0", "A2", "30", 0, "7:14", "", "", "BV2", 45.191323, 5.730705, 2, true, "", "S", false));
            test.m_lsarret.Add(new Arret("Notre Dame Musée", "0", "", "0", 12, "7:15", "", "", "V2", 45.193565, 5.732215, 19, false, "", "S", false));
            test.m_lsarret.Add(new Arret("Île Verte", "0", "", "", 0, "7:16", "", "", "", 45.197286, 5.736712, 0, true, "", "S", false));
            test.m_lsarret.Add(new Arret("La Tronche Hôpital", "0", "", "", 0, "7:17", "", "", "", 45.200633, 5.741139, 0, true, "", "S", false));
            test.m_lsarret.Add(new Arret("Michallon", "0", "", "", 0, "7:19", "", "", "", 45.200280, 5.745447, 0, true, "", "S", false));
            test.m_lsarret.Add(new Arret("Grand Sablon", "0", "", "", 0, "7:21", "", "", "", 45.198984, 5.749868, 0, true, "", "S", false));
            test.m_lsarret.Add(new Arret("Les Taillées-Universités", "0", "", "", 0, "7:23", "", "", "", 45.192195, 5.757836, 0, true, "", "S", false));
            test.m_lsarret.Add(new Arret("Gabriel Fauré", "0", "", "", 0, "7:25", "", "", "", 45.192288, 5.764456, 0, true, "", "S", false));
            test.m_lsarret.Add(new Arret("Bibliothèques Universitaires", "0", "", "", 0, "7:27", "", "", "", 45.192298, 5.770843, 0, true, "", "S", false));
            test.m_lsarret.Add(new Arret("Condillac-Universités", "0", "", "", 0, "7:29", "", "", "", 45.189128, 5.774377, 0, true, "", "S", false));
            test.m_lsarret.Add(new Arret("Mayencin Champ Roman", "0", "", "", 0, "7:31", "", "", "", 45.184743, 5.779055, 0, true, "", "S", false));
            test.m_lsarret.Add(new Arret("Gières Gare-Universités", "0", "", "", 0, "7:33", "", "", "", 45.185393, 5.785503, 0, true, "", "S", false));
            test.m_lsarret.Add(new Arret("Plaine des Sports", "0", "", "", 0, "7:34", "", "", "", 45.187521, 5.784426, 0, true, "", "S", false));

            //Recup pdf files from asset
            AssetManager  assetManager = Assets;
            List <string> lsfiles      = new List <string>();

            try
            {
                lsfiles = new List <string>(assetManager.List("PDF_forDemo"));
            }
            catch (Java.IO.IOException)
            { }
            catch (System.IO.IOException)
            { }

            //save pdf files to local storage : dans le dossier Downloads
            string outDir = Android.OS.Environment.GetExternalStoragePublicDirectory(Android.OS.Environment.DirectoryDownloads).AbsolutePath + "/pdf_Trajet_Tram";

            try
            {
                if (!AskForPermissions.askForWriteExternalStoragePermission(this, 48))
                {
                    return(null);
                }

                if (!System.IO.Directory.Exists(outDir))
                {
                    System.IO.Directory.CreateDirectory(outDir);
                }
                foreach (string sFileName in lsfiles)
                {
                    Stream     input  = null;
                    FileStream output = null;
                    try
                    {
                        input = assetManager.Open("PDF_forDemo/" + sFileName);

                        if (System.IO.File.Exists(outDir + "/" + sFileName))
                        {
                            System.IO.File.Delete(outDir + "/" + sFileName);
                        }

                        output = System.IO.File.Create(outDir + "/" + sFileName);

                        input.CopyTo(output);
                        output.Close();
                    }
                    catch (Java.IO.IOException)
                    { }
                    catch (System.IO.IOException)
                    { }
                }
            }
            catch (Exception e)
            {
                MobileCenter_Helper.ReportError(new FileAccessManager(), e, GetType().Name, MethodBase.GetCurrentMethod().Name);
                DynamicUIBuild_Utils.ShowSnackBar_WithOKButtonToClose(this, m_frameLayout, Resource.String.snackbar_errorHappened);
            }

            LocalDatabase.Get().Addtrajet(new FileAccessManager(), test);
            foreach (Arret p in test.m_lsarret)
            {
                p.m_iLocalIDtrajet = test.m_iLocalID;
                LocalDatabase.Get().AddArret(new FileAccessManager(), p);
            }

            //Refresh list if needed
            m_fragmentListtrajets?.RefreshtrajetList();

            return(test);
        }