public void GetLatestRefundPaymentWithEarningEvent() { var paymentOne = new PaymentBuilder() .With(p => p.LearningStartDate, null) .Build(); var paymentTwo = new PaymentBuilder() .With(p => p.LearningStartDate, null) .With(p => p.LearnerReferenceNumber, "NotLearnerReferenceNumber").Build(); var payments = new List <Payment>() { paymentOne, paymentTwo, }; var recordKey = new RecordKey( PaymentBuilder.LearnerReferenceNumber, PaymentBuilder.LearnerUln, PaymentBuilder.LearningAimReference, PaymentBuilder.LearningStartDate, PaymentBuilder.ProgrammeType, PaymentBuilder.StandardCode, PaymentBuilder.FrameworkCode, PaymentBuilder.PathwayCode, PaymentBuilder.ReportingAimFundingLineType, PaymentBuilder.PriceEpisodeIdentifier, PaymentBuilder.ContractType); NewBuilder().GetLatestRefundPaymentWithEarningEventForRecord(recordKey, payments).Should().BeSameAs(paymentOne); }
public void T001_RangeKey_ContainmentTesting() { RecordKey target = new RecordKey().appendParsedKey("D"); RangemapManager.RangeKey segptr = RangemapManager.RangeKey.newSegmentRangeKey( new RecordKey().appendParsedKey("A"), new RecordKey().appendParsedKey("G"), 0); Assert.AreEqual(true, segptr.eventuallyContainsKey(target), "should be in segptr"); RangemapManager.RangeKey metasegptr = RangemapManager.RangeKey.newSegmentRangeKey( segptr.toRecordKey(), new RecordKey().appendParsedKey("Z"), 0); Assert.AreEqual(true,metasegptr.eventuallyContainsKey(target), "{0} should be in metasegptr {1}", target, metasegptr); RangemapManager.RangeKey segptr2 = RangemapManager.RangeKey.newSegmentRangeKey( new RecordKey().appendParsedKey("E"), new RecordKey().appendParsedKey("Z"), 0); Assert.AreEqual(false,segptr2.eventuallyContainsKey(target), "should not be in segptr2"); RangemapManager.RangeKey metasegptr2 = RangemapManager.RangeKey.newSegmentRangeKey( segptr2.toRecordKey(), new RecordKey().appendParsedKey("A"), 0); Assert.AreEqual(false,metasegptr2.eventuallyContainsKey(target), "should not be in metasegptr2"); // .zdata.index.jeske not in .ROOT.FREELIST.HEAD -> .zdata.index.</tr>.c:\EmailTest\Data\trakken-stats:6919.143 }
public void Parse_Error() { ArgumentNullException exception = Assert.Throws <ArgumentNullException>(() => RecordKey.Parse(null)); Assert.Equal("keyData", exception.ParamName); // Invalid structure Assert.Throws <ArgumentOutOfRangeException>(() => RecordKey.Parse(new ByteString(Encoding.UTF8.GetBytes("/account/name/")))); // Unknown record type Assert.Throws <ArgumentOutOfRangeException>(() => RecordKey.Parse(new ByteString(Encoding.UTF8.GetBytes("/account/name/:DOESNOTEXIST:")))); // Incorrect number of additional components Assert.Throws <ArgumentOutOfRangeException>(() => RecordKey.Parse(new ByteString(Encoding.UTF8.GetBytes("/asset/name/:ACC:/other/:other")))); Assert.Throws <ArgumentOutOfRangeException>(() => RecordKey.Parse(new ByteString(Encoding.UTF8.GetBytes("/asset/name/:ACC")))); // Invalid path Assert.Throws <ArgumentOutOfRangeException>(() => RecordKey.Parse(new ByteString(Encoding.UTF8.GetBytes("account/name/:ACC:/")))); Assert.Throws <ArgumentOutOfRangeException>(() => RecordKey.Parse(new ByteString(Encoding.UTF8.GetBytes("/account/name/:ACC:account")))); }
//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in C#: //ORIGINAL LINE: private <R extends org.neo4j.kernel.impl.store.record.AbstractBaseRecord> void verifyWriteAndRead(System.Func<RecordFormat<R>> formatSupplier, System.Func<org.neo4j.kernel.impl.store.format.RecordGenerators_Generator<R>> generatorSupplier, System.Func<RecordKey<R>> keySupplier, boolean assertPostReadOffset) throws java.io.IOException private void VerifyWriteAndRead <R>(System.Func <RecordFormat <R> > formatSupplier, System.Func <RecordGenerators_Generator <R> > generatorSupplier, System.Func <RecordKey <R> > keySupplier, bool assertPostReadOffset) where R : Org.Neo4j.Kernel.impl.store.record.AbstractBaseRecord { // GIVEN using (PagedFile storeFile = _pageCache.map(new File("store-" + Name.MethodName), _pageSize, CREATE)) { RecordFormat <R> format = formatSupplier(); RecordKey <R> key = keySupplier(); RecordGenerators_Generator <R> generator = generatorSupplier(); int recordSize = format.GetRecordSize(new IntStoreHeader(DATA_SIZE)); BatchingIdSequence idSequence = new BatchingIdSequence(_random.nextBoolean() ? IdSureToBeOnTheNextPage(_pageSize, recordSize) : 10); // WHEN long time = currentTimeMillis(); long endTime = time + TEST_TIME; long i = 0; for ( ; i < TEST_ITERATIONS && currentTimeMillis() < endTime; i++) { R written = generator.Get(recordSize, format, i % 5); R read = format.NewRecord(); try { WriteRecord(written, format, storeFile, recordSize, idSequence); ReadAndVerifyRecord(written, read, format, key, storeFile, recordSize, assertPostReadOffset); idSequence.Reset(); } catch (Exception t) { Exceptions.setMessage(t, t.Message + " : written:" + written + ", read:" + read + ", seed:" + _random.seed() + ", iteration:" + i); throw t; } } } }
public async Task <ActionResult> GetRecordsByName( [FromQuery(Name = "name")] string recordName, [FromQuery(Name = "type")] string recordType) { if (recordName == null) { return(BadRequest()); } RecordKey record; try { record = RecordKey.ParseRecord(recordType, LedgerPath.FromSegments(), recordName); } catch (ArgumentOutOfRangeException) { return(BadRequest()); } IReadOnlyList <Record> records = await this.indexes.GetAllRecords(record.RecordType, record.Name); return(Json(records.Select(GetRecordJson).ToArray())); }
public Task <IList <Record> > GetRecords(IEnumerable <ByteString> keys) { return(Task.FromResult <IList <Record> >(keys.Select(key => { RecordKey recordKey = RecordKey.Parse(key); if (recordKey.Name == "acl") { if (recordKey.Path.FullPath == "/root/subitem/") { return new Record(key, GetValidAcl(), ByteString.Empty); } else if (recordKey.Path.FullPath == "/root/invalid/") { return new Record(key, GetInvalidAcl(), ByteString.Empty); } else if (recordKey.Path.FullPath == "/root/comment/") { return new Record(key, GetCommentedAcl(), ByteString.Empty); } } return new Record(key, ByteString.Empty, ByteString.Empty); }) .ToList())); }
public async Task <ActionResult> GetTransactionsByPath( [FromQuery(Name = "path")] string path) { if (!LedgerPath.TryParse(path, out LedgerPath ledgerPath)) { return(BadRequest()); } var directory = LedgerPath.FromSegments(ledgerPath.Segments.ToArray()); var accounts = await this.store.GetSubaccounts(directory.FullPath); var keys = accounts.Where(x => RecordKey.Parse(x.Key).RecordType == RecordType.Account).Select(x => x.Key); var transactionsData = await this.storageEngine.GetTransactionByRecordKeys(keys, new TransactionFilter()); var transactions = transactionsData.Select(x => new ExtTransaction(x)).ToList(); var hashtable = new Hashtable(); foreach (var transaction in transactions) { foreach (var record in transaction.Mutation.Records) { var val = BitConverter.ToInt64(record.Value.Value.Reverse().ToArray(), 0); hashtable.Add(transaction.MutationHash + record.Key.ToString(), val); } } var res = transactions.Select(x => TransactionToJsonExt(x, hashtable).Value).ToArray(); return(Json(res)); }
/// <summary> /// Check whether a block is a newly added block or is in new segment or recycle store. /// </summary> /// <param name="addStore"></param> /// <param name="fileGrowthStore"></param> /// <param name="recycledCollectionStore"></param> /// <param name="key"></param> /// <param name="blockSize"></param> /// <param name="checkIfInGrowthSegments"></param> /// <returns>true if block is either new, in new segment or in recycle store, false otherwise</returns> internal static void RegisterAdd( Collections.Generic.ISortedDictionary <RecordKey, long> addStore, Collections.Generic.ISortedDictionary <RecordKey, long> fileGrowthStore, Collections.Generic.ISortedDictionary <RecordKey, long> recycledCollectionStore, RecordKey key, int blockSize, bool checkIfInGrowthSegments) { //** Check if Block is in Growth Segments if (checkIfInGrowthSegments && (RegionLogic.IsSegmentInStore(fileGrowthStore, key, blockSize) || RegionLogic.IsSegmentInStore(recycledCollectionStore, key, blockSize))) { return; } //** Add Block to AddStore for use on Rollback... if (!addStore.ContainsKey(key)) { short passCount = 0; //** Detect and merge contiguous blocks if (!addStore.MovePrevious()) { addStore.MoveFirst(); } while (!addStore.EndOfTree()) { var de = addStore.CurrentEntry; RecordKey k2 = de.Value.Key; long i = de.Value.Value; if (k2.ServerSystemFilename == key.ServerSystemFilename && k2.Filename == key.Filename && k2.CollectionName == key.CollectionName) { if (key.Address + blockSize == k2.Address) { long newSize = i + blockSize; addStore.Remove(de.Value.Key); k2.Address = key.Address; addStore.Add(k2, newSize); return; } if (k2.Address + i == key.Address) { addStore.CurrentValue = i + blockSize; return; } if (key.Address >= k2.Address && key.Address + blockSize <= k2.Address + i) { //** if block is inclusive, don't do anything... return; } } else if (++passCount >= 2) { break; } addStore.MoveNext(); } addStore.Add(key, blockSize); } }
static void fetchHitsTest() { LayerManager db = new LayerManager(InitMode.RESUME, @"c:\EmailTest\DB"); Console.WriteLine("====================== FETCH HITS TEST ======================="); Console.WriteLine("====================== FETCH HITS TEST ======================="); Console.WriteLine("====================== FETCH HITS TEST ======================="); Console.WriteLine("====================== FETCH HITS TEST ======================="); Console.WriteLine("====================== FETCH HITS TEST ======================="); Console.WriteLine("====================== FETCH HITS TEST ======================="); Console.WriteLine("====================== FETCH HITS TEST ======================="); var kprefix = new RecordKey().appendParsedKey(".zdata/index/jeske"); var first_row = db.FindNext(kprefix, true); Console.WriteLine("First foudn key: {0}", first_row); #if false int count = 0; foreach (var hit in db.scanForward(new ScanRange<RecordKey>(kprefix, RecordKey.AfterPrefix(kprefix), null))) { Console.WriteLine(hit); count++; } Console.WriteLine("scanned {0} hits", count); #endif }
protected override async Task AddTransaction(long transactionId, byte[] mutationHash, Mutation mutation) { foreach (Record record in mutation.Records) { RecordKey key = RecordKey.Parse(record.Key); var dbRecord = await Records.Where(r => r.Key == record.Key.ToByteArray()).FirstOrDefaultAsync(); //TODO: check if dbrecord is null dbRecord.Type = key.RecordType; dbRecord.Name = key.Name; Context.Update(dbRecord); await Context.SaveChangesAsync(); var newMutation = new Models.RecordMutation { RecordKey = record.Key.ToByteArray(), TransactionId = transactionId, MutationHash = mutationHash }; RecordMutations.Add(newMutation); await Context.SaveChangesAsync(); } }
/// <summary> /// RegisterAdd will be called whenever a "new" block is allocated. /// Don't save block at this point as changes not saved yet. /// </summary> /// <param name="collection"></param> /// <param name="blockAddress"></param> /// <param name="blockSize"></param> protected internal override void RegisterAdd(CollectionOnDisk collection, long blockAddress, int blockSize) { if (IsTransactionStore(collection)) { ((TransactionBase)collection.ParentTransactionLogger).RegisterAdd(collection, blockAddress, blockSize); return; } if (LogCollection == null) { return; } RecordKey key = CreateKey(collection, blockAddress); //** Check if Block is in Growth Segments if (RegionLogic.IsSegmentInStore(_fileGrowthStore, key, blockSize) || RegionLogic.IsSegmentInStore(_recycledSegmentsStore, key, blockSize)) { if (_inCommit == 0) { TrackModification(collection.GetTopParent()); } return; } RegisterAdd(_addBlocksStore, _fileGrowthStore, _recycledSegmentsStore, collection, blockAddress, blockSize, false); if (_inCommit == 0) { TrackModification(collection.GetTopParent()); } }
public void GetLearningDeliveryForRecordKey_NoMatch() { var nonMatchingLearningDelivery = new LearningDeliveryBuilder() .With(ld => ld.LearnAimRef, "NotLearnAimRef") .Build(); var learningDeliveries = new List <LearningDelivery>() { nonMatchingLearningDelivery, }; var learner = new LearnerBuilder() .With(l => l.LearningDeliveries, learningDeliveries) .Build(); var recordKey = new RecordKey( "1", 123456789, "LearnAimRef", new DateTime(2020, 8, 1), 20, 40, 10, 30, "ReportingAimFundingLineType", "PriceEpisodeIdentifier", 1); NewBuilder().GetLearnerLearningDeliveryForRecord(learner, recordKey).Should().BeNull(); }
protected override async Task AddTransaction(long transactionId, byte[] mutationHash, Mutation mutation) { foreach (Record record in mutation.Records) { RecordKey key = RecordKey.Parse(record.Key); await ExecuteAsync(@" UPDATE Records SET Type = @type, Name = @name WHERE Key = @key", new Dictionary <string, object>() { ["@key"] = record.Key.ToByteArray(), ["@type"] = (int)key.RecordType, ["@name"] = key.Name }); await ExecuteAsync(@" INSERT INTO RecordMutations (RecordKey, TransactionId, MutationHash) VALUES (@recordKey, @transactionId, @mutationHash)", new Dictionary <string, object>() { ["@recordKey"] = record.Key.ToByteArray(), ["@transactionId"] = transactionId, ["@mutationHash"] = mutationHash }); } }
public void GetPriceEpisodeStartDateForRecord_InvalidDate() { var priceEpisodeIdentifier = "ThisIsNotADate"; var recordKey = new RecordKey(null, 1, "ZPROG001", null, 1, 1, 1, 1, null, priceEpisodeIdentifier, 1); NewBuilder().GetPriceEpisodeStartDateForRecord(recordKey).Should().BeNull(); }
public void GetPriceEpisodeStartDateForRecord_Length() { var priceEpisodeIdentifier = "TooShort"; var recordKey = new RecordKey(null, 1, "ZPROG001", null, 1, 1, 1, 1, null, priceEpisodeIdentifier, 1); NewBuilder().GetPriceEpisodeStartDateForRecord(recordKey).Should().BeNull(); }
public Earning GetEarningForRecord(RecordKey recordKey, IEnumerable <Payment> paymentsInRow, IEnumerable <Payment> allPayments, IDictionary <Guid, Earning> earningsLookup) { var latestPaymentWithEarningEvent = GetLatestPaymentWithEarningEvent(paymentsInRow) ?? GetLatestRefundPaymentWithEarningEventForRecord(recordKey, allPayments); return(GetEarningForPayment(earningsLookup, latestPaymentWithEarningEvent)); }
public void FromRecord_InvalidRecordType() { RecordKey key = new RecordKey(RecordType.Data, LedgerPath.Parse("/path/"), "name"); Record record = new Record(key.ToBinary(), ByteString.Empty, binaryData[1]); ArgumentOutOfRangeException exception = Assert.Throws<ArgumentOutOfRangeException>(() => AccountStatus.FromRecord(key, record)); Assert.Equal("key", exception.ParamName); }
public void GetPriceEpisodeStartDateForRecord() { var priceEpisodeIdentifier = "SomeStuff-31/12/2019"; var recordKey = new RecordKey(null, 1, "ZPROG001", null, 1, 1, 1, 1, null, priceEpisodeIdentifier, 1); NewBuilder().GetPriceEpisodeStartDateForRecord(recordKey).Should().Be(new DateTime(2019, 12, 31)); }
protected override MongoDbRecord BuildMongoDbRecord(Record rec) { RecordKey key = RecordKey.Parse(rec.Key); var r = new MongoDbRecord { Key = rec.Key.ToByteArray(), KeyS = Encoding.UTF8.GetString(rec.Key.ToByteArray()), Value = rec.Value?.ToByteArray(), Version = rec.Version.ToByteArray(), Path = key.Path.Segments.ToArray(), Type = key.RecordType, Name = key.Name }; return(r); }
public void FromRecord_InvalidRecordType() { RecordKey key = new RecordKey(RecordType.Data, LedgerPath.Parse("/path/"), "name"); Record record = new Record(key.ToBinary(), ByteString.Empty, binaryData[1]); ArgumentOutOfRangeException exception = Assert.Throws <ArgumentOutOfRangeException>(() => AccountStatus.FromRecord(key, record)); Assert.Equal("key", exception.ParamName); }
public void Parse_Data() { ByteString data = new ByteString(Encoding.UTF8.GetBytes("/aka/name/:DATA:record:name")); RecordKey key = RecordKey.Parse(data); Assert.Equal(RecordType.Data, key.RecordType); Assert.Equal("/aka/name/", key.Path.FullPath); Assert.Equal("record:name", key.Name); }
public void Parse_Account() { ByteString data = new ByteString(Encoding.UTF8.GetBytes("/account/name/:ACC:/asset/name/")); RecordKey key = RecordKey.Parse(data); Assert.Equal(RecordType.Account, key.RecordType); Assert.Equal("/account/name/", key.Path.FullPath); Assert.Equal("/asset/name/", key.Name); }
public async Task AddTransactions(IEnumerable <ByteString> transactions) { using (SqlTransaction context = Connection.BeginTransaction(IsolationLevel.Snapshot)) { foreach (ByteString rawTransaction in transactions) { byte[] rawTransactionBuffer = rawTransaction.ToByteArray(); Transaction transaction = MessageSerializer.DeserializeTransaction(rawTransaction); byte[] transactionHash = MessageSerializer.ComputeHash(rawTransactionBuffer); byte[] mutationHash = MessageSerializer.ComputeHash(transaction.Mutation.ToByteArray()); Mutation mutation = MessageSerializer.DeserializeMutation(transaction.Mutation); IReadOnlyList <Record> conflicts = await ExecuteQuery <Record>( "EXEC [Openchain].[AddTransaction] @instance, @transactionHash, @mutationHash, @rawData, @records;", reader => mutation.Records.First(record => record.Key.Equals(new ByteString((byte[])reader[0]))), new Dictionary <string, object>() { ["instance"] = this.instanceId, ["transactionHash"] = transactionHash, ["mutationHash"] = mutationHash, ["rawData"] = rawTransactionBuffer, ["type:records"] = "Openchain.RecordMutationTable", ["records"] = mutation.Records.Select(record => { SqlDataRecord result = new SqlDataRecord(recordMutationMetadata); RecordKey key = ParseRecordKey(record.Key); result.SetBytes(0, 0, record.Key.ToByteArray(), 0, record.Key.Value.Count); if (record.Value == null) { result.SetDBNull(1); } else { result.SetBytes(1, 0, record.Value.ToByteArray(), 0, record.Value.Value.Count); } result.SetBytes(2, 0, record.Version.ToByteArray(), 0, record.Version.Value.Count); result.SetString(3, key.Name); result.SetByte(4, (byte)key.RecordType); return(result); }).ToList() }, context); if (conflicts.Count > 0) { throw new ConcurrentMutationException(conflicts[0]); } } context.Commit(); } }
// BR2 - UKPRN and LearnRefNumber is implicitly matched, not included on models public LearningDelivery GetLearnerLearningDeliveryForRecord(Learner learner, RecordKey recordKey) => learner? .LearningDeliveries? .FirstOrDefault(ld => ld.ProgType == recordKey.ProgrammeType && ld.StdCode == recordKey.StandardCode && ld.FworkCode == recordKey.FrameworkCode && ld.PwayCode == recordKey.PathwayCode && ld.LearnStartDate == recordKey.LearnStartDate && ld.LearnAimRef.CaseInsensitiveEquals(recordKey.LearningAimReference));
public async Task <bool> ExistsAsync(RecordKey key) { var result = await FindAsync(new GetItemRequest(tableName, key) { ConsistentRead = false, ReturnConsumedCapacity = false, AttributesToGet = key.Select(k => k.Key).ToArray() }).ConfigureAwait(false); return(result != null); }
internal static RecordKey CreateKey(ICollectionOnDisk collection, long address) { var key = new RecordKey(); key.ServerSystemFilename = collection.File.Server.Filename; key.Filename = collection.File.Filename; key.CollectionName = collection.Name; key.Address = address; return(key); }
/// <summary> /// Backup Data of a certain disk region onto the transaction log file /// </summary> internal void BackupData(List <KeyValuePair <RecordKey, Region> > dataRegions, ConcurrentIOPoolManager readPool, ConcurrentIOPoolManager writePool) { LogTracer.Verbose("BackupData: Start for Thread {0}.", Thread.CurrentThread.ManagedThreadId); foreach (KeyValuePair <RecordKey, Region> dataRegion in dataRegions) { RecordKey key = dataRegion.Key; Region region = dataRegion.Value; var f = (OnDisk.File.IFile)Server.GetFile(key.Filename); string fFilename = key.Filename; //** foreach disk area in region, copy it to transaction file foreach (KeyValuePair <long, int> area in region) { // short circuit if IO exception was detected. if (readPool.AsyncThreadException != null) { throw readPool.AsyncThreadException; } if (writePool.AsyncThreadException != null) { throw writePool.AsyncThreadException; } var logKey = new BackupDataLogKey(); logKey.SourceFilename = f == null ? fFilename : f.Filename; logKey.SourceDataAddress = area.Key; IEnumerable <KeyValuePair <BackupDataLogKey, BackupDataLogValue> > intersectingLogs; long mergedBlockStartAddress, mergedBlockSize; // todo: optimize LogCollection locking! //LogCollection.Locker.Lock(); LogTracer.Verbose("Transactin.BackupData: Thread {0}, Locking LogCollection, count {1}.", Thread.CurrentThread.ManagedThreadId, LogCollection.Count); bool isIntersectingLogs = GetIntersectingLogs(logKey, area.Value, out intersectingLogs, out mergedBlockStartAddress, out mergedBlockSize); if (isIntersectingLogs) { BackupDataWithIntersection(intersectingLogs, logKey, area, f, fFilename, readPool, writePool, key); } else { BackupDataWithNoIntersection(intersectingLogs, logKey, area, f, fFilename, readPool, writePool, key); } LogTracer.Verbose("Transactin.BackupData: Thread {0}, Unlocking LogCollection, count {1}.", Thread.CurrentThread.ManagedThreadId, LogCollection.Count); //LogCollection.Locker.Unlock(); } } }
protected internal override void RegisterRecycleCollection(CollectionOnDisk collection, long blockAddress, int blockSize) { RecordKey key = CreateKey(collection, blockAddress); if (!_recycledCollectionStore.ContainsKey(key)) { RegisterFileGrowth(_recycledCollectionStore, collection, blockAddress, blockSize, true); } }
override internal protected void TrackModification(CollectionOnDisk collection, bool untrack = false) { CollectionOnDisk p = collection; // Collection.GetTopParent(); RecordKey key = CreateKey(p); if (!untrack) { ModifiedCollections[key] = p; return; } ModifiedCollections.Remove(key); }
internal static void AddMerge( Collections.Generic.ISortedDictionary <RecordKey, long> addStore, RecordKey key, int blockSize) { addStore.Locker.Invoke(() => { // Add Block to AddStore for use on Rollback... if (!addStore.ContainsKey(key)) { short passCount = 0; // Detect and merge contiguous blocks if (!addStore.MovePrevious()) { addStore.MoveFirst(); } while (!addStore.EndOfTree()) { var de = addStore.CurrentEntry; RecordKey k2 = de.Value.Key; long i = de.Value.Value; if (k2.ServerSystemFilename == key.ServerSystemFilename && k2.Filename == key.Filename && k2.CollectionName == key.CollectionName) { if (key.Address + blockSize == k2.Address) { long newSize = i + blockSize; addStore.Remove(de.Value.Key); k2.Address = key.Address; addStore.Add(k2, newSize); return; } if (k2.Address + i == key.Address) { addStore[de.Value.Key] = i + blockSize; //addStore.CurrentValue = i + blockSize; return; } if (key.Address >= k2.Address && key.Address + blockSize <= k2.Address + i) { // if block is inclusive, don't do anything... return; } } else if (++passCount >= 2) { break; } addStore.MoveNext(); } addStore.Add(key, blockSize); } }); }
private void BackupDataWithNoIntersection( IEnumerable <KeyValuePair <BackupDataLogKey, BackupDataLogValue> > intersectingLogs, BackupDataLogKey logKey, KeyValuePair <long, int> area, OnDisk.File.IFile f, string fFilename, ConcurrentIOPoolManager readPool, ConcurrentIOPoolManager writePool, RecordKey key) { string systemBackupFilename = Server.Path + DataBackupFilename; int size = area.Value; key.Address = area.Key; // no intersection nor mergeable logs, add new log! backup and log the data area ConcurrentIOData reader = f != null ? readPool.GetInstance(f, size) : readPool.GetInstance(fFilename, null, size); ConcurrentIOData writer = writePool.GetInstance(systemBackupFilename, (TransactionRoot)Root); if (reader == null || writer == null) { throw new SopException("This program has a bug! 'didn't get reader or writer from Async IO Pool."); } LogTracer.Verbose("BackupDataWithNoIntersection: Start for Thread {0}.", Thread.CurrentThread.ManagedThreadId); var logValue = new BackupDataLogValue(); logValue.DataSize = size; logValue.TransactionId = Id; logValue.BackupFileHandle = GetLogBackupFileHandle(DataBackupFilename); // return the current backup file size and grow it to make room for data to be backed up... logValue.BackupDataAddress = GrowBackupFile(size, writer.FileStream); // save a record of the backed up data.. LogCollection.Add(logKey, logValue); // log after data was backed up!! Sop.VoidFunc logBackedupData = () => { UpdateLogger.LogLine("{0}{1}:{2} to {3}:{4} Size={5}", BackupFromToken, f != null ? f.Filename : fFilename, area.Key, DataBackupFilename, logValue.BackupDataAddress, size); }; writer.FileStream.Seek(logValue.BackupDataAddress, SeekOrigin.Begin, true); reader.FileStream.Seek(area.Key, SeekOrigin.Begin, true); reader.FileStream.BeginRead( reader.Buffer, 0, size, ReadCallback, new object[] { new[] { reader, writer }, true, logKey, logBackedupData }); }
public Task <IReadOnlyList <Record> > GetRecords(IEnumerable <ByteString> keys) { return(Task.FromResult <IReadOnlyList <Record> >(keys.Select(key => { RecordKey recordKey = RecordKey.Parse(key); return new Record( key, new ByteString(BitConverter.GetBytes(this.accounts[recordKey.Path.FullPath]).Reverse()), ByteString.Empty); }) .ToList())); }
internal static void RegisterAdd( Collections.Generic.ISortedDictionary <RecordKey, long> addStore, Collections.Generic.ISortedDictionary <RecordKey, long> fileGrowthStore, Collections.Generic.ISortedDictionary <RecordKey, long> recycledCollectionStore, CollectionOnDisk collection, long blockAddress, int blockSize, bool checkIfInGrowthSegments) { RecordKey key = CreateKey(collection, blockAddress); RegisterAdd(addStore, fileGrowthStore, recycledCollectionStore, key, blockSize, checkIfInGrowthSegments); }
public void T001_RangeKey_Bug() { // .zdata.index.jeske not in .ROOT.FREELIST.HEAD -> .zdata.index.</tr>.c:\EmailTest\Data\trakken-stats:6919.143 RecordKey target = new RecordKey().appendParsedKey(".zdata/index/jeske"); RecordKey lowkey = new RecordKey().appendParsedKey(".ROOT/FREELIST/HEAD"); RecordKey highkey = new RecordKey().appendParsedKey(".zdata/index/<tr>"); RangemapManager.RangeKey segptr = RangemapManager.RangeKey.newSegmentRangeKey( lowkey, highkey, 0); Assert.AreEqual(true, segptr.eventuallyContainsKey(target), "bug failed"); }
public void T000_RangeKey_EncodeDecode() { RecordKey a_key = new RecordKey().appendParsedKey("AAAAAAAAAA/ZZZZZZZZZZZZZZ"); RecordKey b_key = new RecordKey().appendParsedKey("B/ZZ"); var rangekey_preencode = RangemapManager.RangeKey.newSegmentRangeKey(a_key, b_key, 0); RecordKey a_range = rangekey_preencode.toRecordKey(); var rangekey = RangemapManager.RangeKey.decodeFromRecordKey(a_range); Assert.AreEqual(rangekey.lowkey, a_key, "firstkey mismatch"); Assert.AreEqual(rangekey.highkey, b_key, "lastkey mismatch"); }
/// <summary> /// Creates an instance of the <see cref="AccountStatus"/> class from an unparsed record. /// </summary> /// <param name="key">The key of the record.</param> /// <param name="record">The record to create the object from.</param> /// <returns>A new instance of the <see cref="AccountStatus"/> class.</returns> public static AccountStatus FromRecord(RecordKey key, Record record) { if (key.RecordType != RecordType.Account) throw new ArgumentOutOfRangeException(nameof(key)); long amount; if (record.Value.Value.Count == 0) amount = 0; else if (record.Value.Value.Count == 8) amount = BitConverter.ToInt64(record.Value.Value.Reverse().ToArray(), 0); else throw new ArgumentOutOfRangeException(nameof(record)); return new AccountStatus(new AccountKey(key.Path, LedgerPath.Parse(key.Name)), amount, record.Version); }
public void T000_RangeKey_EncodedSort() { RecordKey a_key = new RecordKey().appendParsedKey("AAAAAAA/ZZZZZZZZZ/s/s/s/s/s"); RecordKey b_key = new RecordKey().appendParsedKey("B/ZZ"); Assert.True(a_key.CompareTo(b_key) < 0, "a key should be less than b key"); RecordKey a_range = RangemapManager.RangeKey.newSegmentRangeKey(a_key, a_key, 0).toRecordKey(); RecordKey b_range = RangemapManager.RangeKey.newSegmentRangeKey(b_key, b_key, 0).toRecordKey(); Console.WriteLine(Lsd.ToHexString(a_range.encode())); Console.WriteLine(Lsd.ToHexString(b_range.encode())); Assert.True(a_range.CompareTo(b_range) < 0, "a range should also be less than b range!!"); Assert.True(b_range.CompareTo(a_range) > 0, "b range should be greater than a range!!"); }
private void debugDump(ISortedSegment seg, String indent, HashSet<string> seenGenerations) { HashSet<string> nextSeenGenerations = new HashSet<string>(seenGenerations); RecordKey genkey = new RecordKey().appendParsedKey(".ROOT/GEN"); // first, print all our keys foreach (KeyValuePair<RecordKey, RecordUpdate> kvp in seg.sortedWalk()) { String value_str = kvp.Value.ToString(); if (value_str.Length < 50) { Console.WriteLine(indent + kvp.Key + " : " + value_str + " "); } else { Console.WriteLine(indent + kvp.Key + " : " + value_str.Substring(0, 10) + "..[" + (value_str.Length - 40) + " more bytes]"); } if (kvp.Key.isSubkeyOf(genkey)) { nextSeenGenerations.Add(kvp.Key.ToString()); } } // second, walk the rangemap foreach (KeyValuePair<RecordKey, RecordUpdate> kvp in seg.sortedWalk()) { // see if this is a range key (i.e. .ROOT/GEN/###/</> ) // .. if so, recurse if (kvp.Key.isSubkeyOf(genkey) && kvp.Value.type == RecordUpdateTypes.FULL) { if (seenGenerations.Contains(kvp.Key.ToString())) { Console.WriteLine("--- Skipping Tombstoned layer for Key " + kvp.Key.ToString()); } else { Console.WriteLine("--- Layer for Keys: " + kvp.Key.ToString()); ISortedSegment newseg = rangemapmgr.getSegmentFromMetadata(kvp.Value); debugDump(newseg, indent + " ", nextSeenGenerations); } } } }
public void T12_RecordKeyDecodePerfTest() { RecordKey rk = new RecordKey().appendParsedKey(".data/test/unpack/with/lots/of/parts"); int NUM_ITERATIONS = 100000; // encode test { GC.Collect(); DateTime start = DateTime.Now; for (int x = 0; x < NUM_ITERATIONS; x++) { byte[] data = rk.encode(); } DateTime end = DateTime.Now; double elapsed_s = (end - start).TotalMilliseconds / 1000.0; double rec_per_s = (double)NUM_ITERATIONS / elapsed_s; Console.WriteLine("packed {0} record keys in {1} seconds, {2} keys/second", NUM_ITERATIONS, elapsed_s, rec_per_s); Assert.Less(500000, rec_per_s, "minimum records per second of pack"); } // decode test { byte[] data = rk.encode(); GC.Collect(); DateTime start = DateTime.Now; for (int x = 0; x < NUM_ITERATIONS; x++) { RecordKey unpacked = new RecordKey(data); } DateTime end = DateTime.Now; double elapsed_s = (end - start).TotalMilliseconds / 1000.0; double rec_per_s = (double)NUM_ITERATIONS / elapsed_s; Console.WriteLine("unpacked {0} record keys in {1} seconds, {2} keys/second", NUM_ITERATIONS, elapsed_s, rec_per_s); Assert.Less(500000, rec_per_s, "minimum records per second of unpack"); } }
public void T01_RecordOffsetList_sortedWalk() { string[] testvalues = { "test/1", "test/2", "test/3" }; byte[] databuffer; // encode a buffer { MemoryStream ms = new MemoryStream(); // add some values to the block encoder SegmentBlockEncoderRecordOffsetList enc = new SegmentBlockEncoderRecordOffsetList(); enc.setStream(ms); for (int i = 0; i < testvalues.Length; i++) { RecordKey tkey = new RecordKey().appendParsedKey(testvalues[i]); RecordUpdate tupdate = RecordUpdate.WithPayload("data: " + testvalues[i]); enc.add(tkey, tupdate); } enc.flush(); databuffer = ms.ToArray(); } Console.WriteLine("databuffer len : " + databuffer.Length); Console.WriteLine("Hex: " + Lsd.ToHexString(databuffer)); // test sortedWalk { BlockAccessor rs = new BlockAccessor(databuffer); var decoder = new SegmentBlockDecoderRecordOffsetList(rs); int count = 0; foreach (var row in decoder.sortedWalk()) { Console.WriteLine(row); count++; } Assert.AreEqual(testvalues.Length, count, "wrong number of elements in sorted walk"); } }
public ReplHandler(IStepsSnapshotKVDB db, ServerContext ctx) { this.next_stage = db; this.my_repl_interface = new MyReplConnection(this); this.rnd = new Random(); this.pusher = new ReplPusher(this); this.ctx = ctx; try { var di_rk = new RecordKey() .appendKeyPart("_config") .appendKeyPart("DATA-INSTANCE-ID"); var rec = db.FindNext(di_rk, true); if (di_rk.CompareTo(rec.Key) != 0) { throw new Exception( String.Format("ReplHandler {0} , not able to fetch DATA-INSTANCE-ID", ctx.server_guid)); } this.data_instance_id = rec.Value.ToString(); Console.WriteLine("ReplHandler - {0}: data_instance_id {1}", ctx.server_guid, data_instance_id); } catch (KeyNotFoundException) { throw new Exception("no data instance ID, try InitResume or InitJoin"); } // check server_guid matches? // register ourself ctx.connector.registerServer(ctx.server_guid, this.my_repl_interface); // startup our background task worker = new Thread(delegate() { this.workerThread(); }); worker.Start(); }
public void setValue(RecordKey skey, RecordUpdate supdate) { checkActive(); // (1) write our repl log entry DateTime now = DateTime.Now; long logstamp = id_gen.nextTimestamp(); RecordKey logkey = new RecordKey() .appendKeyPart("_logs") .appendKeyPart(ctx.server_guid) .appendKeyPart(new RecordKeyType_Long(logstamp)); // (1.1) pack the key/value together into the log entry byte[] packed_update; { MemoryStream writer = new MemoryStream(); // TODO: this seems like a really inefficient way to write out a key ISegmentBlockEncoder encoder = new SegmentBlockBasicEncoder(); encoder.setStream(writer); encoder.add(skey, supdate); encoder.flush(); packed_update = writer.ToArray(); } RecordUpdate logupdate = RecordUpdate.WithPayload(packed_update); Console.WriteLine("writing log entry: {0} -> [ {1} = {2} ]", logkey, skey, supdate); next_stage.setValue(logkey, logupdate); // (2) trigger the repl notifier that there is a new entry to push pusher.wakeUpLogSleepers(); // (2) write the record key Console.WriteLine("writing data entry: {0} = {1}", skey, supdate); RecordKey private_record_key = new RecordKey() .appendKeyPart("_data"); foreach (var part in skey.key_parts) { private_record_key.appendKeyPart(part); } next_stage.setValue(private_record_key, supdate); }
public JoinInfo requestToJoin(string server_guid) { // (1) record his guid next_stage.setValue(new RecordKey() .appendKeyPart("_config").appendKeyPart("seeds").appendKeyPart(server_guid), RecordUpdate.WithPayload("")); // (2) send him our instance ID and a list of seeds var ji = new JoinInfo(); ji.data_instance_id = this.data_instance_id; ji.seed_servers = new List<string>(); var seed_key_prefix = new RecordKey() .appendKeyPart("_config") .appendKeyPart("seeds"); foreach (var row in next_stage.scanForward(new ScanRange<RecordKey>(seed_key_prefix, RecordKey.AfterPrefix(seed_key_prefix), null))) { string sname = ((RecordKeyType_String)row.Key.key_parts[row.Key.key_parts.Count - 1]).GetString(); ji.seed_servers.Add(sname); } // add ourself to the seed list! if (!ji.seed_servers.Contains(this.ctx.server_guid)) { ji.seed_servers.Add(this.ctx.server_guid); } return ji; }
public static void Block_Perftest(IBlockTestFactory factory) { // iterate through blocksizes, randomly generating input data, and then doing some // random key queries to see how fast retrieval is int[] block_sizes = { 2 * 1024, 40 * 1024, 100 * 1024, 512 * 1025, 2 * 1024 * 1024 }; int[] value_sizes = { 10, 30, 100, 1000, 10000 }; int[] num_levels = { 2, 3, 4 }; int[,] perf_results = new int[block_sizes.Length, value_sizes.Length]; int READ_COUNT = 3000; Random rnd = new Random((int)DateTime.Now.ToBinary()); foreach (int key_part_count in num_levels) { System.Console.WriteLine("--"); foreach (int block_size in block_sizes) { foreach (int value_size in value_sizes) { if (value_size > (block_size / 8)) { // we want at least 8 values continue; } System.GC.Collect(); // setup the block for encoding ISegmentBlockEncoder enc = factory.makeEncoder(); MemoryStream ms = new MemoryStream(); enc.setStream(ms); int curblock_size = 0; // do the sorted block create.. we nest it so we can dispose the SkipList { var sorted_input = new BDSkipList<RecordKey, RecordUpdate>(); // first create the sorted input while (curblock_size < block_size) { // generate a random key RecordKey key = new RecordKey(); for (int i = 0; i < key_part_count; i++) { key.appendParsedKey("" + rnd.Next(0xFFFFFF) + rnd.Next(0xFFFFFF) + rnd.Next(0xFFFFFF)); } // generate a random value byte[] data = new byte[value_size]; for (int i = 0; i < value_size; i++) { data[i] = (byte)rnd.Next(40, 50); } RecordUpdate upd = RecordUpdate.WithPayload(data); curblock_size += key.encode().Length; curblock_size += value_size; sorted_input.Add(key, upd); } // encode the block foreach (var kvp in sorted_input) { enc.add(kvp.Key, kvp.Value); } enc.flush(); sorted_input = null; // free the skiplist } // init the decoder ISegmentBlockDecoder dec = factory.makeDecoder(new BlockAccessor(ms.ToArray())); int num_misses = 0; System.GC.Collect(); // force GC so it may not happen during the test // perform random access test DateTime start = DateTime.Now; for (int i = 0; i < READ_COUNT; i++) { RecordKey key = new RecordKey(); for (int ki = 0; ki < key_part_count; ki++) { key.appendParsedKey("" + rnd.Next(8) + rnd.Next(0xFFFFFF) + rnd.Next(0xFFFFFF)); } try { dec.FindNext(key, true); } catch (KeyNotFoundException) { num_misses++; // System.Console.WriteLine("misfetch: {0}", key); // no problem, but this shouuld be small } } double duration_ms = (DateTime.Now - start).TotalMilliseconds; double reads_per_second = (READ_COUNT * 1000.0) / (duration_ms); System.Console.WriteLine("BlockSize src{0,10} final{6,10} ratio ({7:0.000}), ValueSize {1,6}, Keyparts {5,3}, {2,6} reads in {3,10:0.0}ms, {8,6} misses, {4,9:0.00} read/sec", curblock_size, value_size, READ_COUNT, duration_ms, reads_per_second, key_part_count, ms.Length, ((double)ms.Length / (double)curblock_size) * (double)100.0, num_misses); } } } }
public void setValueParsed(string skey, string svalue) { RecordKey key = new RecordKey(); key.appendParsedKey(skey); RecordUpdate update = RecordUpdate.WithPayload(svalue); this.setValue(key, update); }
private LogEntry _decodeLogEntry(RecordKey key, RecordData data) { var le = new LogEntry(); if (!((RecordKeyType_String)key.key_parts[0]).GetString().Equals("_logs")) { throw new Exception("_decodeLogEntry: handed non-log entry: " + key.ToString()); } le.server_guid = ((RecordKeyType_String)key.key_parts[1]).GetString(); le.logstamp = ((RecordKeyType_Long)key.key_parts[2]).GetLong(); le.data = data.data; return le; }
public static void T01_BlockEncodeDecodeTest() { MemoryStream ms = new MemoryStream(); byte[] testdata = { 0x00, 0x10, 0x78, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00 }; // init an encoder and add one key which requires escaping { ISegmentBlockEncoder enc = new SegmentBlockBasicEncoder(); enc.setStream(ms); RecordKey key = new RecordKey().appendParsedKey("TESTSETEST"); RecordUpdate update = RecordUpdate.WithPayload(testdata); enc.add(key, update); enc.flush(); ms.Flush(); System.Console.WriteLine("Test Update: " + update.ToString()); } byte[] block_contents = ms.ToArray(); System.Console.WriteLine("Block Output: " + BitConverter.ToString(block_contents)); // init the decoder { ISegmentBlockDecoder dec = new SegmentBlockBasicDecoder(new BlockAccessor(ms.ToArray())); foreach (var kvp in dec.sortedWalk()) { System.Console.WriteLine("Payload Update: " + kvp.Value.ToString()); byte[] payload = kvp.Value.data; Assert.AreEqual(testdata, payload, "payload data mismatch!"); } } }
public void setValue(RecordKey key, RecordUpdate update) { if (this.is_frozen) { throw new Exception("snapshot not writable! " + this.frozen_at_snapshotnumber); } // add our snapshot_number to the end of the keyspace key.appendKeyPart(new RecordKeyType_AttributeTimestamp(this.current_snapshot)); // wrap the update into a sub-update, mostly because tombstones need to be "real" records // to us var sub_update = RecordUpdate.WithPayload(update.encode()); next_stage.setValue(key, sub_update); }
public void T000_TestBasic_SnapshotScanAll() { // TODO: right now we have to make a subset stage, because otherwise // we see the .ROOT keyspace. Perhaps we should make prefixes // an automatic part of stage instantiation!?!? var snap_db = new StepsStageSnapshot( new StepsStageSubset( new RecordKeyType_String("snapdb"), new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\snap"))); string[] keys = new string[] { "1/2/3", "1/3/4", "1/5/3" }; foreach (var key in keys) { snap_db.setValue(new RecordKey().appendParsedKey(key), RecordUpdate.WithPayload("snap1 data:" + key)); } // TODO: check the data contents also to make sure we actually saw the right rows { int count = 0; foreach (var row in snap_db.scanForward(ScanRange<RecordKey>.All())) { var match_key = new RecordKey().appendParsedKey(keys[count]); Assert.True(match_key.CompareTo(row.Key) == 0, "scan key mismatch"); Console.WriteLine("scanned: " + row); count++; } Assert.AreEqual(keys.Length, count, "incorrect number of keys in stage1 scan"); } var snap1 = snap_db.getSnapshot(); foreach (var key in keys) { var newkey = new RecordKey().appendParsedKey(key).appendParsedKey("snap2"); snap_db.setValue(newkey, RecordUpdate.WithPayload("snap2 data:" + key)); } { int count = 0; foreach (var row in snap1.scanForward(ScanRange<RecordKey>.All())) { var match_key = new RecordKey().appendParsedKey(keys[count]); Assert.True(match_key.CompareTo(row.Key) == 0, "scan key mismatch"); Console.WriteLine("scanned: " + row); count++; } Assert.AreEqual(keys.Length, count, "incorrect number of keys in snap scan"); } }
internal IEnumerable<LogStatus> getStatusForLogs() { var seeds_prefix = new RecordKey() .appendParsedKey("_config/seeds"); var scanrange = new ScanRange<RecordKey>(seeds_prefix, RecordKey.AfterPrefix(seeds_prefix), null); yield return getStatusForLog(ctx.server_guid); // be sure to include myself foreach (var seed_row in next_stage.scanForward(scanrange)) { RecordKeyType last_keypart = seed_row.Key.key_parts[seed_row.Key.key_parts.Count - 1]; string server_guid = ((RecordKeyType_String)last_keypart).GetString(); if (server_guid.Equals(ctx.server_guid)) { continue; } // skip ourselves yield return getStatusForLog(server_guid); } }
internal IEnumerable<KeyValuePair<RecordKey, RecordData>> fetchLogEntries( string log_server_guid, RecordKeyType log_start_key, int limit = -1, bool block = false) { var rk_start = new RecordKey() .appendKeyPart("_logs") .appendKeyPart(log_server_guid); if (!log_start_key.Equals("")) { rk_start.appendKeyPart(log_start_key); } var rk_end = new RecordKey() .appendKeyPart("_logs") .appendKeyPart(log_server_guid); var scanrange = new ScanRange<RecordKey>(rk_start, RecordKey.AfterPrefix(rk_end), null); Console.WriteLine(" fetchLogEntries (block:{3}) for ({0}): start {1} end {2}", log_server_guid, rk_start, rk_end, block); bool matched_first = false; int count = 0; retry_log_fetch: foreach (var logrow in next_stage.scanForward(scanrange)) { if (!matched_first) { // the first logrow needs to match the log_start_key, or there was a gap in the log!! var logstamp = logrow.Key.key_parts[2]; if (logstamp.CompareTo(log_start_key) != 0) { throw new LogException( String.Format("log start gap! guid:{0} log_start_key:{1} logstamp:{2}", log_server_guid,log_start_key,logstamp)); } matched_first = true; continue; } yield return logrow; count++; // if we're limiting the number of return rows... if (limit != -1) { if (count > limit) { yield break; } } } if (!matched_first) { throw new LogException("no log entries!"); } // if we only matched one log row, then it should be the matching first row. if ((count == 0) && block) { Console.WriteLine("++++++++ block on log tail"); lock (this.logWaiters) { Monitor.Wait(this.logWaiters); } Console.WriteLine("++++++++ wakeup from log tail"); goto retry_log_fetch; } }
internal void _recordLogEntry(string from_server_guid, long logstamp, RecordUpdate logdata) { RecordKey logkey = new RecordKey() .appendKeyPart("_logs") .appendKeyPart(from_server_guid) .appendKeyPart(new RecordKeyType_Long(logstamp)); next_stage.setValue(logkey, logdata); pusher.wakeUpLogSleepers(); }
public void doValidate() { int count = 0; while (true) { try { RecordKey key = new RecordKey().appendParsedKey(key_to_check); RecordData data; num_checks++; if (db.getRecord(key, out data) != GetStatus.PRESENT) { num_errors++; System.Console.WriteLine("!!!!! ValueCheckerThread ERROR: key(" + key_to_check + ") not present."); } else { if (!data.ReadDataAsString().Equals(value_to_expect)) { num_errors++; System.Console.WriteLine("!!!!! ValueCheckerThread ERROR: key(" + key_to_check + ") value mismatch, " + data.ReadDataAsString() + " != " + value_to_expect); } // check the contents } } catch (Exception e) { System.Console.WriteLine("!!!!! Exception in ValueCheckerThread {0}, {1}", key_to_check, e.ToString()); num_errors++; } if (should_end) { is_ended = true; return; } // sleep every Nth iteration if (count++ > 10) { count = 0; Thread.Sleep(0); } } }
private byte[] fetchLogEntries_block(string log_server_guid, string log_start_key, string log_end_key) { var rk_start = new RecordKey() .appendKeyPart("_logs") .appendKeyPart(log_server_guid) .appendKeyPart(log_start_key); var rk_end = RecordKey.AfterPrefix(new RecordKey() .appendKeyPart("_logs") .appendKeyPart(log_server_guid) .appendKeyPart(log_end_key)); var scanrange = new ScanRange<RecordKey>(rk_start, rk_end, null); byte[] packed_log_records; { MemoryStream writer = new MemoryStream(); // TODO: this seems like a really inefficient way to write out a key ISegmentBlockEncoder encoder = new SegmentBlockBasicEncoder(); encoder.setStream(writer); foreach (var logrow in next_stage.scanForward(scanrange)) { encoder.add(logrow.Key, RecordUpdate.WithPayload(logrow.Value.data)); } encoder.flush(); packed_log_records = writer.ToArray(); } // IF there are no log entries... BLOCK! return packed_log_records; }
public static void do_test(DbgGUI window, string[] args) { if (args.Length < 1) { Console.WriteLine("Usage:\n index - clear the db and index email\n search - perform search tests"); Environment.Exit(1); } if (args[0].CompareTo("index") == 0) { LayerManager db = new LayerManager(InitMode.NEW_REGION, DB_PATH ); db.startMaintThread(); PsudoEmailInjector injector = new PsudoEmailInjector(db, window); injector.parse_email_messages(); injector.indexer.find_email_test(); } else if (args[0].CompareTo("search") == 0) { LayerManager db = new LayerManager(InitMode.RESUME, DB_PATH); PsudoEmailInjector injector = new PsudoEmailInjector(db, window); window.debugDump(db); injector.indexer.find_email_test(); } else if (args[0].CompareTo("merge") == 0) { LayerManager db = new LayerManager(InitMode.RESUME, DB_PATH); window.debugDump(db); // merge... for (int x = 0; x < 30; x++) { var mc = db.rangemapmgr.mergeManager.getBestCandidate(); window.debugDump(db, mc); if (mc == null) { Console.WriteLine("no more merge candidates."); break; } db.performMerge(mc); window.debugDump(db); } } else if (args[0].CompareTo("test") == 0) { LayerManager db = new LayerManager(InitMode.RESUME, DB_PATH); window.debugDump(db); var key1 = new RecordKey() .appendParsedKey(@".zdata/index/which/c:\EmailTest\Data\Sent:5441/10"); var key2 = new RecordKey() .appendParsedKey(@".zdata/index/zzn/c:\EmailTest\Data\saved_mail_2003:4962/385"); var segkey = new RecordKey() .appendParsedKey(".ROOT/GEN") .appendKeyPart(new RecordKeyType_Long(1)) .appendKeyPart(key1) .appendKeyPart(key2); var nextrow = db.FindNext(segkey, false); Console.WriteLine("next: {0}", nextrow); var exactRow = db.FindNext(nextrow.Key, true); Console.WriteLine("refind: {0}", exactRow); } Console.WriteLine("done...."); Environment.Exit(0); }
internal void applyLogEntry(string from_server_guid, long logstamp, RecordUpdate logdata) { // (0) unpack the data BlockAccessor ba = new BlockAccessor(logdata.data); ISegmentBlockDecoder decoder = new SegmentBlockBasicDecoder(ba); // (1) add it to our copy of that server's log this._recordLogEntry(from_server_guid, logstamp, logdata); // (2) add it to the database foreach (var kvp in decoder.sortedWalk()) { RecordKey local_data_key = new RecordKey() .appendKeyPart("_data"); foreach (var part in kvp.Key.key_parts) { local_data_key.appendKeyPart(part); } next_stage.setValue(local_data_key, kvp.Value); } }
public void T04_SingleSegmentRootMetadataLogRecovery() { // TEST: test multiple segments flushed, and "log resumed" (walk .ROOT range map) // perform the previous test T03_SegmentLayerGetRecordApplicationOrder(); // ... and then perform a resume LayerManager db = new LayerManager(InitMode.RESUME, "c:\\BENDtst\\6"); String[] keys = { "test-1", "test-2", "test-3" }; String[] values = { "a-second", "b-second", "c-second" }; // verify that it has the same data as before the RESUME { // working segment should be empty for (int i = 0; i < keys.Length; i++) { RecordKey key = new RecordKey(); key.appendKeyPart(keys[i]); // look directly in the working segment, they should be MISSING // This is testing the checkpoint as well. If log resume didn't // CHECKPOINT_DROP, then the values will be duplicated in the working segment. { RecordUpdate update; GetStatus status = db.workingSegment.getRecordUpdate(key, out update); Assert.AreEqual(GetStatus.MISSING, status, "working segment should be MISSING"); } // assure the global query interface finds the NEW VALUES { RecordData data; GetStatus status = db.getRecord(key, out data); Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES"); Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES"); } } // now generate a BUNCH of new segments... { String[] secondkeys = { "second-test-1", "second-test-2", "second-test-3" }; String[] secondvalues = { "a-second", "b-second", "c-second" }; // put each new record in its OWN segment for (int i = 0; i < secondkeys.Length; i++) { LayerWriteGroup txn = db.newWriteGroup(); txn.setValueParsed(secondkeys[i], secondvalues[i]); txn.finish(); db.flushWorkingSegment(); } db.Dispose(); db.debugDump(); // RESUME db = new LayerManager(InitMode.RESUME, "c:\\BENDtst\\6"); // first test records should still be visible for (int i = 0; i < keys.Length; i++) { RecordKey key = new RecordKey(); key.appendKeyPart(keys[i]); // look directly in the working segment, they should be MISSING // This is testing the checkpoint as well. If log resume didn't // CHECKPOINT_DROP, then the values will be duplicated in the working segment. { RecordUpdate update; GetStatus status = db.workingSegment.getRecordUpdate(key, out update); Assert.AreEqual(GetStatus.MISSING, status, "working segment should be MISSING {0}", key); } // assure the global query interface finds the NEW VALUES { RecordData data; GetStatus status = db.getRecord(key, out data); Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES : {0}", key); Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES : {0}", key); } } db.debugDump(); // verify that the secondkeys/values are still in there for (int i = 0; i < secondkeys.Length; i++) { RecordKey key = new RecordKey(); key.appendKeyPart(secondkeys[i]); // look directly in the working segment, they should be MISSING // This is testing the checkpoint as well. If log resume didn't // CHECKPOINT_DROP, then the values will be duplicated in the working segment. { RecordUpdate update; GetStatus status = db.workingSegment.getRecordUpdate(key, out update); Assert.AreEqual(GetStatus.MISSING, status, "working segment should be MISSING"); } // assure the global query interface finds the NEW VALUES { RecordData data; GetStatus status = db.getRecord(key, out data); Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES, where is: " + key); Assert.AreEqual(secondvalues[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES"); } } } db.Dispose(); } }
public void scanSeeds() { // Console.WriteLine("** seed scan {0}", myhandler.ctx.server_guid); var seed_key_prefix = new RecordKey() .appendKeyPart("_config") .appendKeyPart("seeds"); // scan our config list of seeds foreach (var row in myhandler.next_stage.scanForward( new ScanRange<RecordKey>(seed_key_prefix, RecordKey.AfterPrefix(seed_key_prefix), null))) { string sname = ((RecordKeyType_String)row.Key.key_parts[row.Key.key_parts.Count - 1]).GetString(); if (sname == myhandler.ctx.server_guid) { continue; // ignore our own guid!! } // Console.WriteLine("** seed scan {0} row: {1}", myhandler.ctx.server_guid, row); if (!servers.Contains(sname)) try { IReplConnection srvr = myhandler.ctx.connector.getServerHandle(sname); // only add this as a seed if it's active! if (srvr.getState() == ReplState.active) { this.addServer(sname); Console.WriteLine("** scan seed, server {0} pusher, added seed {1}", myhandler.ctx.server_guid, sname); } } catch (KeyNotFoundException) { // server handle not found by connector } } }
public LogStatus getStatusForLog(string server_guid) { var log_status = new LogStatus(); log_status.server_guid = server_guid; var log_prefix_key = new RecordKey() .appendKeyPart("_logs") .appendKeyPart(server_guid); // first log entry for this log try { var oldestlogrow = next_stage.FindNext(log_prefix_key, false); if (oldestlogrow.Key.isSubkeyOf(log_prefix_key)) { log_status.oldest_entry_pointer = ((RecordKeyType_Long)oldestlogrow.Key.key_parts[oldestlogrow.Key.key_parts.Count - 1]); } else { log_status.oldest_entry_pointer = new RecordKeyType_Long(0); } } catch (KeyNotFoundException) { log_status.oldest_entry_pointer = new RecordKeyType_Long(0); } // newest log entry for this log try { var newestlogrow = next_stage.FindPrev(RecordKey.AfterPrefix(log_prefix_key), false); if (newestlogrow.Key.isSubkeyOf(log_prefix_key)) { log_status.log_commit_head = ((RecordKeyType_Long)newestlogrow.Key.key_parts[newestlogrow.Key.key_parts.Count - 1]); } else { log_status.log_commit_head = new RecordKeyType_Long(0); } } catch (KeyNotFoundException) { log_status.log_commit_head = new RecordKeyType_Long(0); } // Console.WriteLine("_statusForLog returning: " + log_status); return log_status; }
internal ReadThreadsTest(int rec_count, int rec_per_segment) { this.TEST_RECORD_COUNT = rec_count; this.RECORDS_PER_SEGMENT = rec_per_segment; System.GC.Collect(); db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\10"); testdata = new SortedDictionary<string, string>(); testrows = new SortedDictionary<RecordKey, RecordUpdate>(); // generate some data for (int i=0;i<TEST_RECORD_COUNT;i++) { string key = "test/" + i.ToString(); string value = "data: " + key; testdata[key] = value; RecordKey rkey = new RecordKey().appendParsedKey(key); RecordUpdate rupdate = RecordUpdate.WithPayload(value); testrows[rkey] = rupdate; } // fill the db with some data. int pos = 0; foreach (KeyValuePair<RecordKey,RecordUpdate> kvp in testrows) { LayerWriteGroup txn = db.newWriteGroup(); txn.setValue(kvp.Key, kvp.Value); txn.finish(); pos++; if ((pos % RECORDS_PER_SEGMENT) == 0) { db.flushWorkingSegment(); } } db.flushWorkingSegment(); }