/// <exception cref="System.IO.IOException"/> internal EditLogBackupOutputStream(NamenodeRegistration bnReg, JournalInfo journalInfo ) : base() { // RPC proxy to backup node // backup node registration // active node registration // serialized output sent to backup node // backup node // active name-node this.bnRegistration = bnReg; this.journalInfo = journalInfo; IPEndPoint bnAddress = NetUtils.CreateSocketAddr(bnRegistration.GetAddress()); try { this.backupNode = NameNodeProxies.CreateNonHAProxy <JournalProtocol>(new HdfsConfiguration (), bnAddress, UserGroupInformation.GetCurrentUser(), true).GetProxy(); } catch (IOException e) { Storage.Log.Error("Error connecting to: " + bnAddress, e); throw; } this.doubleBuf = new EditsDoubleBuffer(DefaultBufferSize); this.@out = new DataOutputBuffer(DefaultBufferSize); }
internal BackupJournalManager(NamenodeRegistration bnReg, NamenodeRegistration nnReg ) { journalInfo = new JournalInfo(nnReg.GetLayoutVersion(), nnReg.GetClusterID(), nnReg .GetNamespaceID()); this.bnReg = bnReg; }
/// <exception cref="System.IO.IOException"/> public override void Journal(JournalInfo journalInfo, long epoch, long firstTxId, int numTxns, byte[] records) { namesystem.CheckOperation(NameNode.OperationCategory.Journal); VerifyJournalRequest(journalInfo); GetBNImage().Journal(firstTxId, numTxns, records); }
///////////////////////////////////////////////////// // BackupNodeProtocol implementation for backup node. ///////////////////////////////////////////////////// /// <exception cref="System.IO.IOException"/> public override void StartLogSegment(JournalInfo journalInfo, long epoch, long txid ) { namesystem.CheckOperation(NameNode.OperationCategory.Journal); VerifyJournalRequest(journalInfo); GetBNImage().NamenodeStartedLogSegment(txid); }
public IEnumerable <JournalInfo> JournalDetails(DateTime dt1, DateTime dt2, int FundId) { var q = from c in DbUtil.Db.Contributions where dt1 <= c.ContributionDate.Value.Date where c.ContributionDate.Value.Date <= dt2 where c.ContributionStatusId == ContributionStatusCode.Recorded where !ContributionTypeCode.ReturnedReversedTypes.Contains(c.ContributionTypeId) where c.ContributionTypeId != ContributionTypeCode.Pledge where c.FundId == FundId where c.BundleDetails.First().BundleHeader.BundleStatusId == 0 group c by new { c.BundleDetails.FirstOrDefault().BundleHeader.BundleHeaderId, c.ContributionDate } into g select new JournalInfo { HeaderId = g.Key.BundleHeaderId, Total = g.Sum(t => t.ContributionAmount), Date = g.Key.ContributionDate.Value, Count = g.Count(), FundName = g.First().ContributionFund.FundName, }; JournalTotal = new JournalInfo { Count = q.Sum(t => t.Count), Total = q.Sum(t => t.Total), FundName = q.Select(ff => ff.FundName).FirstOrDefault(), }; return(q.OrderBy(j => j.HeaderId).ThenBy(j => j.Date)); }
public InvalidJournalException(string message, JournalInfo journalInfo) : base($"{message}. Journal details: " + $"{nameof(journalInfo.CurrentJournal)} - {journalInfo.CurrentJournal}, " + $"{nameof(journalInfo.LastSyncedJournal)} - {journalInfo.LastSyncedJournal}, " + $"{nameof(journalInfo.LastSyncedTransactionId)} - {journalInfo.LastSyncedTransactionId}, " + $"{nameof(journalInfo.Flags)} - {journalInfo.Flags}") { }
public InvalidJournalException(long number, JournalInfo journalInfo) : base($"No such journal '{number}'. Journal details: " + $"{nameof(journalInfo.CurrentJournal)} - {journalInfo.CurrentJournal}, " + $"{nameof(journalInfo.LastSyncedJournal)} - {journalInfo.LastSyncedJournal}, " + $"{nameof(journalInfo.LastSyncedTransactionId)} - {journalInfo.LastSyncedTransactionId}" + $"{nameof(journalInfo.Flags)} - {journalInfo.Flags}") { Number = number; }
/// <exception cref="System.IO.IOException"/> public virtual void StartLogSegment(JournalInfo journalInfo, long epoch, long txid ) { JournalProtocolProtos.StartLogSegmentRequestProto req = ((JournalProtocolProtos.StartLogSegmentRequestProto )JournalProtocolProtos.StartLogSegmentRequestProto.NewBuilder().SetJournalInfo(PBHelper .Convert(journalInfo)).SetEpoch(epoch).SetTxid(txid).Build()); try { rpcProxy.StartLogSegment(NullController, req); } catch (ServiceException e) { throw ProtobufHelper.GetRemoteException(e); } }
/// <exception cref="System.IO.IOException"/> public virtual void Journal(JournalInfo journalInfo, long epoch, long firstTxnId, int numTxns, byte[] records) { JournalProtocolProtos.JournalRequestProto req = ((JournalProtocolProtos.JournalRequestProto )JournalProtocolProtos.JournalRequestProto.NewBuilder().SetJournalInfo(PBHelper. Convert(journalInfo)).SetEpoch(epoch).SetFirstTxnId(firstTxnId).SetNumTxns(numTxns ).SetRecords(PBHelper.GetByteString(records)).Build()); try { rpcProxy.Journal(NullController, req); } catch (ServiceException e) { throw ProtobufHelper.GetRemoteException(e); } }
/// <exception cref="System.IO.IOException"/> public virtual FenceResponse Fence(JournalInfo journalInfo, long epoch, string fencerInfo ) { JournalProtocolProtos.FenceRequestProto req = ((JournalProtocolProtos.FenceRequestProto )JournalProtocolProtos.FenceRequestProto.NewBuilder().SetEpoch(epoch).SetJournalInfo (PBHelper.Convert(journalInfo)).Build()); try { JournalProtocolProtos.FenceResponseProto resp = rpcProxy.Fence(NullController, req ); return new FenceResponse(resp.GetPreviousEpoch(), resp.GetLastTransactionId(), resp .GetInSync()); } catch (ServiceException e) { throw ProtobufHelper.GetRemoteException(e); } }
/// <summary>Verifies a journal request</summary> /// <exception cref="System.IO.IOException"/> private void VerifyJournalRequest(JournalInfo journalInfo) { VerifyLayoutVersion(journalInfo.GetLayoutVersion()); string errorMsg = null; int expectedNamespaceID = namesystem.GetNamespaceInfo().GetNamespaceID(); if (journalInfo.GetNamespaceId() != expectedNamespaceID) { errorMsg = "Invalid namespaceID in journal request - expected " + expectedNamespaceID + " actual " + journalInfo.GetNamespaceId(); Log.Warn(errorMsg); throw new UnregisteredNodeException(journalInfo); } if (!journalInfo.GetClusterId().Equals(namesystem.GetClusterId())) { errorMsg = "Invalid clusterId in journal request - expected " + journalInfo.GetClusterId () + " actual " + namesystem.GetClusterId(); Log.Warn(errorMsg); throw new UnregisteredNodeException(journalInfo); } }
public void ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal, Action <string> infoNotify = null) { infoNotify = infoNotify ?? (s => { }); var dataPager = env.Options.DataPager; var copier = new DataCopier(AbstractPager.PageSize * 16); Transaction txr = null; try { infoNotify("Voron copy headers"); using (var file = new FileStream(backupPath, FileMode.Create)) using (var package = new ZipArchive(file, ZipArchiveMode.Create)) { long allocatedPages; ImmutableAppendOnlyList <JournalFile> files; // thread safety copy var usedJournals = new List <JournalFile>(); long lastWrittenLogPage = -1; long lastWrittenLogFile = -1; using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) // so we can snapshot the headers safely { txr = env.NewTransaction(TransactionFlags.Read); // now have snapshot view allocatedPages = dataPager.NumberOfAllocatedPages; Debug.Assert(HeaderAccessor.HeaderFileNames.Length == 2); VoronBackupUtil.CopyHeaders(compression, package, copier, env.Options); // journal files snapshot files = env.Journal.Files; JournalInfo journalInfo = env.HeaderAccessor.Get(ptr => ptr->Journal); for (var journalNum = journalInfo.CurrentJournal - journalInfo.JournalFilesCount + 1; journalNum <= journalInfo.CurrentJournal; journalNum++) { var journalFile = files.FirstOrDefault(x => x.Number == journalNum); // first check journal files currently being in use if (journalFile == null) { long journalSize; using (var pager = env.Options.OpenJournalPager(journalNum)) { journalSize = Utils.NearestPowerOfTwo(pager.NumberOfAllocatedPages * AbstractPager.PageSize); } journalFile = new JournalFile(env.Options.CreateJournalWriter(journalNum, journalSize), journalNum); } journalFile.AddRef(); usedJournals.Add(journalFile); } if (env.Journal.CurrentFile != null) { lastWrittenLogFile = env.Journal.CurrentFile.Number; lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition - 1; } // txw.Commit(); intentionally not committing } // data file backup var dataPart = package.CreateEntry(Constants.DatabaseFilename, compression); Debug.Assert(dataPart != null); if (allocatedPages > 0) //only true if dataPager is still empty at backup start { using (var dataStream = dataPart.Open()) { // now can copy everything else var firstDataPage = dataPager.Read(0); copier.ToStream(firstDataPage.Base, AbstractPager.PageSize * allocatedPages, dataStream); } } try { foreach (var journalFile in usedJournals) { var journalPart = package.CreateEntry(StorageEnvironmentOptions.JournalName(journalFile.Number), compression); Debug.Assert(journalPart != null); var pagesToCopy = journalFile.JournalWriter.NumberOfAllocatedPages; if (journalFile.Number == lastWrittenLogFile) { pagesToCopy = lastWrittenLogPage + 1; } using (var stream = journalPart.Open()) { copier.ToStream(journalFile, 0, pagesToCopy, stream); infoNotify(string.Format("Voron copy journal file {0} ", journalFile)); } } } finally { foreach (var journalFile in usedJournals) { journalFile.Release(); } } } } finally { if (txr != null) { txr.Dispose(); } } infoNotify(string.Format("Voron backup db finished")); }
internal static JournalFile GetJournalFile(StorageEnvironment env, long journalNum, IncrementalBackupInfo backupInfo, JournalInfo journalInfo) { var journalFile = env.Journal.Files.FirstOrDefault(x => x.Number == journalNum); // first check journal files currently being in use if (journalFile != null) { journalFile.AddRef(); return(journalFile); } try { using (var pager = env.Options.OpenJournalPager(journalNum, journalInfo)) { long journalSize = Bits.PowerOf2(pager.NumberOfAllocatedPages * Constants.Storage.PageSize); journalFile = new JournalFile(env, env.Options.CreateJournalWriter(journalNum, journalSize), journalNum); journalFile.AddRef(); return(journalFile); } } catch (InvalidJournalException e) { if (backupInfo.LastBackedUpJournal == -1 && journalNum == 0) { throw new InvalidOperationException("The first incremental backup creation failed because the first journal file " + StorageEnvironmentOptions.JournalName(journalNum) + " was not found. " + "Did you turn on the incremental backup feature after initializing the storage? " + "In order to create backups incrementally the storage must be created with IncrementalBackupEnabled option set to 'true'.", e); } throw; } }
private static void Backup( StorageEnvironment env, CompressionLevel compression, Action <string> infoNotify, Action backupStarted, AbstractPager dataPager, ZipArchive package, string basePath, DataCopier copier) { var usedJournals = new List <JournalFile>(); long lastWrittenLogPage = -1; long lastWrittenLogFile = -1; LowLevelTransaction txr = null; try { long allocatedPages; var writePesistentContext = new TransactionPersistentContext(true); var readPesistentContext = new TransactionPersistentContext(true); using (var txw = env.NewLowLevelTransaction(writePesistentContext, TransactionFlags.ReadWrite)) // so we can snapshot the headers safely { txr = env.NewLowLevelTransaction(readPesistentContext, TransactionFlags.Read); // now have snapshot view allocatedPages = dataPager.NumberOfAllocatedPages; Debug.Assert(HeaderAccessor.HeaderFileNames.Length == 2); infoNotify("Voron copy headers for " + basePath); VoronBackupUtil.CopyHeaders(compression, package, copier, env.Options, basePath); // journal files snapshot var files = env.Journal.Files; // thread safety copy JournalInfo journalInfo = env.HeaderAccessor.Get(ptr => ptr->Journal); for (var journalNum = journalInfo.CurrentJournal - journalInfo.JournalFilesCount + 1; journalNum <= journalInfo.CurrentJournal; journalNum++) { var journalFile = files.FirstOrDefault(x => x.Number == journalNum); // first check journal files currently being in use if (journalFile == null) { long journalSize; using (var pager = env.Options.OpenJournalPager(journalNum)) { journalSize = Bits.NextPowerOf2(pager.NumberOfAllocatedPages * env.Options.PageSize); } journalFile = new JournalFile(env, env.Options.CreateJournalWriter(journalNum, journalSize), journalNum); } journalFile.AddRef(); usedJournals.Add(journalFile); } if (env.Journal.CurrentFile != null) { lastWrittenLogFile = env.Journal.CurrentFile.Number; lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition - 1; } // txw.Commit(); intentionally not committing } backupStarted?.Invoke(); // data file backup var dataPart = package.CreateEntry(Path.Combine(basePath, Constants.DatabaseFilename), compression); Debug.Assert(dataPart != null); if (allocatedPages > 0) //only true if dataPager is still empty at backup start { using (var dataStream = dataPart.Open()) { // now can copy everything else copier.ToStream(dataPager, 0, allocatedPages, dataStream); } } try { foreach (JournalFile journalFile in usedJournals) { var entryName = Path.Combine(basePath, StorageEnvironmentOptions.JournalName(journalFile.Number)); var journalPart = package.CreateEntry(entryName, compression); Debug.Assert(journalPart != null); long pagesToCopy = journalFile.JournalWriter.NumberOfAllocatedPages; if (journalFile.Number == lastWrittenLogFile) { pagesToCopy = lastWrittenLogPage + 1; } using (var stream = journalPart.Open()) { copier.ToStream(env, journalFile, 0, pagesToCopy, stream); infoNotify(string.Format("Voron copy journal file {0}", entryName)); } } } finally { foreach (var journalFile in usedJournals) { journalFile.Release(); } } } finally { txr?.Dispose(); } }
public IEnumerable<JournalInfo> JournalDetails(DateTime dt1, DateTime dt2, int FundId) { var q = from c in DbUtil.Db.Contributions where dt1 <= c.ContributionDate.Value.Date where c.ContributionDate.Value.Date <= dt2 where c.ContributionStatusId == ContributionStatusCode.Recorded where !ContributionTypeCode.ReturnedReversedTypes.Contains(c.ContributionTypeId) where c.ContributionTypeId != ContributionTypeCode.Pledge where c.FundId == FundId where c.BundleDetails.First().BundleHeader.BundleStatusId == 0 group c by new { c.BundleDetails.FirstOrDefault().BundleHeader.BundleHeaderId, c.ContributionDate } into g select new JournalInfo { HeaderId = g.Key.BundleHeaderId, Total = g.Sum(t => t.ContributionAmount), Date = g.Key.ContributionDate.Value, Count = g.Count(), FundName = g.First().ContributionFund.FundName, }; JournalTotal = new JournalInfo { Count = q.Sum(t => t.Count), Total = q.Sum(t => t.Total), FundName = q.Select(ff => ff.FundName).FirstOrDefault(), }; return q.OrderBy(j => j.HeaderId).ThenBy(j => j.Date); }
internal static void Fill(JournalInfo dest, DB.JournalNameDAO source) { dest.IdJournal = source.IdJournal; dest.Name = source.Name; dest.UniqueKey = source.UniqueKey; }
private static void Backup( StorageEnvironment env, CompressionLevel compression, Action <string> infoNotify, Action backupStarted, AbstractPager dataPager, ZipArchive package, string basePath, DataCopier copier) { var usedJournals = new List <JournalFile>(); long lastWrittenLogPage = -1; long lastWrittenLogFile = -1; LowLevelTransaction txr = null; var backupSuccess = false; try { long allocatedPages; var writePesistentContext = new TransactionPersistentContext(true); var readPesistentContext = new TransactionPersistentContext(true); using (var txw = env.NewLowLevelTransaction(writePesistentContext, TransactionFlags.ReadWrite)) // so we can snapshot the headers safely { txr = env.NewLowLevelTransaction(readPesistentContext, TransactionFlags.Read); // now have snapshot view allocatedPages = dataPager.NumberOfAllocatedPages; Debug.Assert(HeaderAccessor.HeaderFileNames.Length == 2); infoNotify("Voron copy headers for " + basePath); VoronBackupUtil.CopyHeaders(compression, package, copier, env.Options, basePath); // journal files snapshot var files = env.Journal.Files; // thread safety copy JournalInfo journalInfo = env.HeaderAccessor.Get(ptr => ptr->Journal); for (var journalNum = journalInfo.CurrentJournal - journalInfo.JournalFilesCount + 1; journalNum <= journalInfo.CurrentJournal; journalNum++) { var journalFile = files.FirstOrDefault(x => x.Number == journalNum); // first check journal files currently being in use if (journalFile == null) { long journalSize; using (var pager = env.Options.OpenJournalPager(journalNum)) { journalSize = Bits.NextPowerOf2(pager.NumberOfAllocatedPages * Constants.Storage.PageSize); } journalFile = new JournalFile(env, env.Options.CreateJournalWriter(journalNum, journalSize), journalNum); } journalFile.AddRef(); usedJournals.Add(journalFile); } if (env.Journal.CurrentFile != null) { lastWrittenLogFile = env.Journal.CurrentFile.Number; lastWrittenLogPage = env.Journal.CurrentFile.WritePosIn4KbPosition - 1; } // txw.Commit(); intentionally not committing } backupStarted?.Invoke(); // data file backup var dataPart = package.CreateEntry(Path.Combine(basePath, Constants.DatabaseFilename), compression); Debug.Assert(dataPart != null); if (allocatedPages > 0) //only true if dataPager is still empty at backup start { using (var dataStream = dataPart.Open()) { // now can copy everything else copier.ToStream(dataPager, 0, allocatedPages, dataStream); } } try { long lastBackedupJournal = 0; foreach (var journalFile in usedJournals) { var entryName = StorageEnvironmentOptions.JournalName(journalFile.Number); var journalPart = package.CreateEntry(Path.Combine(basePath, entryName), compression); Debug.Assert(journalPart != null); long pagesToCopy = journalFile.JournalWriter.NumberOfAllocated4Kb; if (journalFile.Number == lastWrittenLogFile) { pagesToCopy = lastWrittenLogPage + 1; } using (var stream = journalPart.Open()) { copier.ToStream(env, journalFile, 0, pagesToCopy, stream); infoNotify(string.Format("Voron copy journal file {0}", entryName)); } lastBackedupJournal = journalFile.Number; } if (env.Options.IncrementalBackupEnabled) { env.HeaderAccessor.Modify(header => { header->IncrementalBackup.LastBackedUpJournal = lastBackedupJournal; //since we backed-up everything, no need to start next incremental backup from the middle header->IncrementalBackup.LastBackedUpJournalPage = -1; }); } backupSuccess = true; } catch (Exception) { backupSuccess = false; throw; } finally { var lastSyncedJournal = env.HeaderAccessor.Get(header => header->Journal).LastSyncedJournal; foreach (var journalFile in usedJournals) { if (backupSuccess) // if backup succeeded we can remove journals { if (journalFile.Number < lastWrittenLogFile && // prevent deletion of the current journal and journals with a greater number journalFile.Number < lastSyncedJournal) // prevent deletion of journals that aren't synced with the data file { journalFile.DeleteOnClose = true; } } journalFile.Release(); } } } finally { txr?.Dispose(); } }
/// <exception cref="System.IO.IOException"/> public override FenceResponse Fence(JournalInfo journalInfo, long epoch, string fencerInfo ) { Log.Info("Fenced by " + fencerInfo + " with epoch " + epoch); throw new NotSupportedException("BackupNode does not support fence"); }
public UnregisteredNodeException(JournalInfo info) : base("Unregistered server: " + info.ToString()) { }