Ejemplo n.º 1
0
        private async Task Restore()
        {
            TransactionalStorageLoadResponse <TState> loadresponse = await storage.Load();

            this.storageBatch = new StorageBatch <TState>(loadresponse);

            this.stableState          = loadresponse.CommittedState;
            this.stableSequenceNumber = loadresponse.CommittedSequenceId;

            if (logger.IsEnabled(LogLevel.Debug))
            {
                logger.Debug($"Load v{this.stableSequenceNumber} {loadresponse.PendingStates.Count}p {storageBatch.MetaData.CommitRecords.Count}c");
            }

            // ensure clock is consistent with loaded state
            this.Clock.Merge(storageBatch.MetaData.TimeStamp);

            // resume prepared transactions (not TM)
            foreach (var pr in loadresponse.PendingStates.OrderBy(ps => ps.TimeStamp))
            {
                if (pr.SequenceId > loadresponse.CommittedSequenceId && pr.TransactionManager.Reference != null)
                {
                    if (logger.IsEnabled(LogLevel.Debug))
                    {
                        logger.Debug($"recover two-phase-commit {pr.TransactionId}");
                    }

                    ParticipantId tm = pr.TransactionManager;

                    commitQueue.Add(new TransactionRecord <TState>()
                    {
                        Role                        = CommitRole.RemoteCommit,
                        TransactionId               = Guid.Parse(pr.TransactionId),
                        Timestamp                   = pr.TimeStamp,
                        State                       = pr.State,
                        SequenceNumber              = pr.SequenceId,
                        TransactionManager          = tm,
                        PrepareIsPersisted          = true,
                        LastSent                    = default(DateTime),
                        ConfirmationResponsePromise = null,
                        NumberWrites                = 1 // was a writing transaction
                    });
                    this.stableSequenceNumber = pr.SequenceId;
                }
            }

            // resume committed transactions (on TM)
            foreach (var kvp in storageBatch.MetaData.CommitRecords)
            {
                if (logger.IsEnabled(LogLevel.Debug))
                {
                    logger.Debug($"recover commit confirmation {kvp.Key}");
                }
                this.confirmationWorker.Add(kvp.Key, kvp.Value.Timestamp, kvp.Value.WriteParticipants);
            }

            // check for work
            this.storageWorker.Notify();
            this.RWLock.Notify();
        }
Ejemplo n.º 2
0
        // attempt to clear transaction from commit log
        private async Task <bool> TryCollect(Guid transactionId)
        {
            try
            {
                var storeComplete = new TaskCompletionSource <bool>();
                // Now we can remove the commit record.
                StorageBatch <TState> storageBatch = getStorageBatch();
                storageBatch.Collect(transactionId);
                storageBatch.FollowUpAction(() =>
                {
                    if (this.logger.IsEnabled(LogLevel.Trace))
                    {
                        this.logger.LogTrace("Collection completed. TransactionId:{TransactionId}", transactionId);
                    }
                    this.pending.Remove(transactionId);
                    storeComplete.TrySetResult(true);
                });

                storageWorker.Notify();

                // wait for storage call, so we don't free spin
                await Task.WhenAll(storeComplete.Task, Task.Delay(this.options.ConfirmationRetryDelay));

                if (storeComplete.Task.IsCompleted)
                {
                    return(storeComplete.Task.Result);
                }
            }
            catch (Exception ex)
            {
                this.logger.LogWarning($"Error occured while cleaning up transaction {transactionId} from commit log.  Will retry.", ex);
            }
            return(false);
        }
Ejemplo n.º 3
0
        // attempt to clear transaction from commit log
        private async Task <bool> TryCollect(Guid transactionId)
        {
            try
            {
                var storeComplete = new TaskCompletionSource <bool>(TaskCreationOptions.RunContinuationsAsynchronously);
                // Now we can remove the commit record.
                StorageBatch <TState> storageBatch = getStorageBatch();
                storageBatch.Collect(transactionId);
                storageBatch.FollowUpAction(() =>
                {
                    if (this.logger.IsEnabled(LogLevel.Trace))
                    {
                        this.logger.LogTrace("Collection completed. TransactionId:{TransactionId}", transactionId);
                    }
                    this.pending.Remove(transactionId);
                    storeComplete.TrySetResult(true);
                });

                storageWorker.Notify();

                // wait for storage call, so we don't free spin
                return(await storeComplete.Task);
            }
            catch (Exception ex)
            {
                this.logger.LogWarning(ex, "Error occured while cleaning up transaction {TransactionId} from commit log.  Will retry.", transactionId);
            }

            return(false);
        }
        protected override void OnPushStorageBatch(StorageBatch storageBatch)
        {
            if (!(storageBatch.ExtraData is CirrusStorageBatch cirrusStorageBatch))
            {
                throw new ArgumentNullException(nameof(cirrusStorageBatch));
            }

            var t1 = Task.Run(() =>
            {
                if (cirrusStorageBatch.CirrusContractTable.Any())
                {
                    cirrusdDb.CirrusContractTable.InsertMany(cirrusStorageBatch.CirrusContractTable, new InsertManyOptions {
                        IsOrdered = false
                    });
                }
            });

            var t2 = Task.Run(() =>
            {
                if (cirrusStorageBatch.CirrusContractCodeTable.Any())
                {
                    cirrusdDb.CirrusContractCodeTable.InsertMany(cirrusStorageBatch.CirrusContractCodeTable, new InsertManyOptions {
                        IsOrdered = false
                    });
                }
            });


            Task.WaitAll(t1, t2);
        }
Ejemplo n.º 5
0
        public override async Task OnExecute()
        {
            IBlockchainClient client = clientFactory.Create(connection);

            List <string> allIndexes = mongoData.GetBlockIndexIndexes();

            if (allIndexes.Count == BlockIndexer.ExpectedNumberOfIndexes)
            {
                Runner.GlobalState.IndexModeCompleted = true;
            }

            Runner.GlobalState.PullingTip = null;
            Runner.GlobalState.StoreTip   = null;

            Runner.GlobalState.StoreTip = await syncOperations.RewindToLastCompletedBlockAsync();

            if (Runner.GlobalState.StoreTip == null)
            {
                // No blocks in store start from zero
                // push the genesis block to store
                int    start       = 0;
                string genesisHash = await client.GetblockHashAsync(start);


                log.LogInformation($"Processing genesis hash = {genesisHash}");

                BlockInfo genesisBlock = await client.GetBlockAsync(genesisHash);

                SyncBlockTransactionsOperation block = syncOperations.FetchFullBlock(connection, genesisBlock);

                StorageBatch genesisBatch = new StorageBatch();
                storageOperations.AddToStorageBatch(genesisBatch, block);
                Runner.GlobalState.StoreTip = storageOperations.PushStorageBatch(genesisBatch);
            }

            BlockInfo fetchedBlock = await client.GetBlockAsync(Runner.GlobalState.StoreTip.BlockHash);

            if (fetchedBlock == null)
            {
                // check if the fullnode is ahead of the indexer height
                int fullnodeTipHeight = client.GetBlockCount();
                if (fullnodeTipHeight < Runner.GlobalState.StoreTip.BlockIndex)
                {
                    throw new ApplicationException($"Full node at height {fullnodeTipHeight} which is behind the Indexer at height {Runner.GlobalState.StoreTip.BlockIndex}");
                }

                // reorg happend while indexer was offline rewind the indexer database
                Runner.GlobalState.PullingTip = null;
                Runner.GlobalState.StoreTip   = null;

                Runner.GlobalState.StoreTip = await syncOperations.RewindToBestChain(connection);
            }

            // update the chains tip
            Runner.GlobalState.ChainTipHeight = syncOperations.GetBlockCount(client);
        }
Ejemplo n.º 6
0
 public IStorageBatch Batch()
 {
     sync.EnterWriteLock();
     {
         var newBatch = new StorageBatch(repository, statistics, delegate
         {
             sync.ExitWriteLock();
         });
         newBatch.Prepare();
         return newBatch;
     }
 }
        public void AddToStorageBatch(StorageBatch storageBatch, SyncBlockTransactionsOperation item)
        {
            storageBatch.TotalSize += item.BlockInfo.Size;
            storageBatch.BlockTable.Add(item.BlockInfo.Height, mongoBlockToStorageBlock.Map(item.BlockInfo));

            int transactionIndex = 0;

            foreach (Transaction trx in item.Transactions)
            {
                string trxHash = trx.GetHash().ToString();

                storageBatch.TransactionBlockTable.Add(
                    new TransactionBlockTable
                {
                    BlockIndex       = item.BlockInfo.HeightAsUint32,
                    TransactionId    = trxHash,
                    TransactionIndex = transactionIndex++,
                });

                if (configuration.StoreRawTransactions)
                {
                    storageBatch.TransactionTable.Add(new TransactionTable
                    {
                        TransactionId  = trxHash,
                        RawTransaction = trx.ToBytes(syncConnection.Network.Consensus.ConsensusFactory)
                    });
                }

                int outputIndex = 0;
                foreach (TxOut output in trx.Outputs)
                {
                    ScriptOutputInfo res  = scriptInterpeter.InterpretScript(syncConnection.Network, output.ScriptPubKey);
                    string           addr = res != null
                  ? (res?.Addresses != null && res.Addresses.Any()) ? res.Addresses.First() : res.ScriptType
                  : "none";

                    var outpoint = new Outpoint {
                        TransactionId = trxHash, OutputIndex = outputIndex++
                    };

                    storageBatch.OutputTable.Add(outpoint.ToString(), new OutputTable
                    {
                        Address    = addr,
                        Outpoint   = outpoint,
                        BlockIndex = item.BlockInfo.HeightAsUint32,
                        Value      = output.Value,
                        ScriptHex  = output.ScriptPubKey.ToHex(),
                        CoinBase   = trx.IsCoinBase,
                        CoinStake  = syncConnection.Network.Consensus.IsProofOfStake && trx.IsCoinStake,
                    });
                }

                if (trx.IsCoinBase)
                {
                    continue; //no need to check the inputs for that transaction
                }
                foreach (TxIn input in trx.Inputs)
                {
                    var outpoint = new Outpoint
                    {
                        TransactionId = input.PrevOut.Hash.ToString(), OutputIndex = (int)input.PrevOut.N
                    };

                    storageBatch.OutputTable.TryGetValue(outpoint.ToString(), out OutputTable output);

                    storageBatch.InputTable.Add(new InputTable()
                    {
                        Outpoint   = outpoint,
                        TrxHash    = trxHash,
                        BlockIndex = item.BlockInfo.HeightAsUint32,
                        Address    = output?.Address,
                        Value      = output?.Value ?? 0,
                    });
                }
            }

            // allow any extensions to add ot the batch.
            OnAddToStorageBatch(storageBatch, item);
        }
 protected virtual void OnPushStorageBatch(StorageBatch storageBatch)
 {
 }
 protected virtual void OnAddToStorageBatch(StorageBatch storageBatch, SyncBlockTransactionsOperation item)
 {
 }
        public SyncBlockInfo PushStorageBatch(StorageBatch storageBatch)
        {
            if (globalState.IndexModeCompleted)
            {
                if (globalState.IbdMode() == false)
                {
                    if (globalState.LocalMempoolView.Any())
                    {
                        var toRemoveFromMempool = storageBatch.TransactionBlockTable.Select(s => s.TransactionId).ToList();

                        FilterDefinitionBuilder <MempoolTable> builder = Builders <MempoolTable> .Filter;
                        FilterDefinition <MempoolTable>        filter  = builder.In(mempoolItem => mempoolItem.TransactionId,
                                                                                    toRemoveFromMempool);

                        db.Mempool.DeleteMany(filter);

                        foreach (string mempooltrx in toRemoveFromMempool)
                        {
                            globalState.LocalMempoolView.Remove(mempooltrx, out _);
                        }
                    }
                }
            }

            var blockTableTask = storageBatch.BlockTable.Values.Any()
            ? db.BlockTable.InsertManyAsync(storageBatch.BlockTable.Values, new InsertManyOptions {
                IsOrdered = false
            })
            : Task.CompletedTask;

            var transactionBlockTableTask = storageBatch.TransactionBlockTable.Any()
            ? db.TransactionBlockTable.InsertManyAsync(storageBatch.TransactionBlockTable, new InsertManyOptions {
                IsOrdered = false
            })
            : Task.CompletedTask;

            var outputTableTask = storageBatch.OutputTable.Any()
            ? db.OutputTable.InsertManyAsync(storageBatch.OutputTable.Values, new InsertManyOptions {
                IsOrdered = false
            })
            : Task.CompletedTask;

            Task transactionTableTask = Task.Run(() =>
            {
                try
                {
                    if (storageBatch.TransactionTable.Any())
                    {
                        db.TransactionTable.InsertMany(storageBatch.TransactionTable, new InsertManyOptions {
                            IsOrdered = false
                        });
                    }
                }
                catch (MongoBulkWriteException mbwex)
                {
                    // transactions are a special case they are not deleted from store in case of reorgs
                    // because they will just be included in another blocks, so we ignore if key is already present
                    if (mbwex.WriteErrors.Any(e => e.Category != ServerErrorCategory.DuplicateKey))
                    {
                        throw;
                    }
                }
            });

            var utxos = new List <UnspentOutputTable>(storageBatch.OutputTable.Values.Count);

            foreach (OutputTable outputTable in storageBatch.OutputTable.Values)
            {
                if (outputTable.Address.Equals(OpReturnAddress))
                {
                    continue;
                }

                // TODO: filter out outputs that are already spent in the storageBatch.InputTable table
                // such inputs will get deleted anyway in the next operation of UnspentOutputTable.DeleteMany
                // this means we should probably make the storageBatch.InputTable a dictionary as well.

                utxos.Add(new UnspentOutputTable
                {
                    Address    = outputTable.Address,
                    Outpoint   = outputTable.Outpoint,
                    Value      = outputTable.Value,
                    BlockIndex = outputTable.BlockIndex
                });
            }

            var unspentOutputTableTask = utxos.Any()
            ? db.UnspentOutputTable.InsertManyAsync(utxos, new InsertManyOptions {
                IsOrdered = false
            })
            : Task.CompletedTask;

            var inputTableTask = Task.CompletedTask;

            if (storageBatch.InputTable.Any())
            {
                var utxosLookups = FetchUtxos(
                    storageBatch.InputTable
                    .Where(_ => _.Address == null)
                    .Select(_ => _.Outpoint));

                foreach (InputTable input in storageBatch.InputTable)
                {
                    if (input.Address != null)
                    {
                        continue;
                    }

                    string key = input.Outpoint.ToString();
                    input.Address = utxosLookups[key].Address;
                    input.Value   = utxosLookups[key].Value;
                }

                inputTableTask = db.InputTable.InsertManyAsync(storageBatch.InputTable, new InsertManyOptions {
                    IsOrdered = false
                });
            }

            Task.WaitAll(blockTableTask, transactionBlockTableTask, outputTableTask, inputTableTask, transactionTableTask, unspentOutputTableTask);

            if (storageBatch.InputTable.Any())
            {
                // TODO: if earlier we filtered out outputs that are already spent and not pushed to the utxo table
                // now we do not need to try and delete such outputs becuase they where never pushed to the store.

                var outpointsFromNewInput = storageBatch.InputTable
                                            .Select(_ => _.Outpoint)
                                            .ToList();

                var filterToDelete = Builders <UnspentOutputTable> .Filter
                                     .Where(_ => outpointsFromNewInput.Contains(_.Outpoint));

                var deleteResult = db.UnspentOutputTable.DeleteMany(filterToDelete);

                if (deleteResult.DeletedCount != outpointsFromNewInput.Count)
                {
                    throw new ApplicationException($"Delete of unspent outputs did not complete successfully : {deleteResult.DeletedCount} deleted but {outpointsFromNewInput.Count} expected");
                }
            }

            // allow any extensions to push to repo before we complete the block.
            OnPushStorageBatch(storageBatch);

            string lastBlockHash        = null;
            long   blockIndex           = 0;
            var    markBlocksAsComplete = new List <UpdateOneModel <BlockTable> >();

            foreach (BlockTable mapBlock in storageBatch.BlockTable.Values.OrderBy(b => b.BlockIndex))
            {
                FilterDefinition <BlockTable> filter =
                    Builders <BlockTable> .Filter.Eq(block => block.BlockIndex, mapBlock.BlockIndex);

                UpdateDefinition <BlockTable> update =
                    Builders <BlockTable> .Update.Set(blockInfo => blockInfo.SyncComplete, true);

                markBlocksAsComplete.Add(new UpdateOneModel <BlockTable>(filter, update));
                lastBlockHash = mapBlock.BlockHash;
                blockIndex    = mapBlock.BlockIndex;
            }

            // mark each block is complete
            db.BlockTable.BulkWrite(markBlocksAsComplete, new BulkWriteOptions()
            {
                IsOrdered = true
            });

            SyncBlockInfo block = storage.BlockByIndex(blockIndex);

            if (block.BlockHash != lastBlockHash)
            {
                throw new ArgumentException($"Expected hash {blockIndex} for block {lastBlockHash} but was {block.BlockHash}");
            }

            return(block);
        }
Ejemplo n.º 11
0
        private async Task StorageWork()
        {
            try
            {
                if (problemFlag != TransactionalStatus.Ok)
                {
                    RWLock.AbortExecutingTransactions();

                    // abort all entries in the commit queue
                    foreach (var entry in commitQueue.Elements)
                    {
                        NotifyOfAbort(entry, problemFlag);
                    }
                    commitQueue.Clear();

                    if (problemFlag == TransactionalStatus.StorageConflict)
                    {
                        logger.Debug("deactivating after storage conflict");
                        this.deactivate();
                        this.RWLock.AbortQueuedTransactions();
                    }
                    else
                    {
                        logger.Debug($"restoring state after status={problemFlag}");
                        // recover, clear storageFlag, then allow next queued transaction(s) to enter lock
                        await NotifyOfRestore();
                    }
                }
                else
                {
                    // count committable entries at the bottom of the commit queue
                    int committableEntries = 0;
                    while (committableEntries < commitQueue.Count && commitQueue[committableEntries].ReadyToCommit)
                    {
                        committableEntries++;
                    }

                    // process all committable entries, assembling a storage batch
                    if (committableEntries > 0)
                    {
                        // process all committable entries, adding storage events to the storage batch
                        CollectEventsForBatch(committableEntries);
                        if (problemFlag != TransactionalStatus.Ok)
                        {
                            return;
                        }

                        if (logger.IsEnabled(LogLevel.Debug))
                        {
                            var r = commitQueue.Count > committableEntries ? commitQueue[committableEntries].ToString() : "";
                            logger.Debug($"batchcommit={committableEntries} leave={commitQueue.Count - committableEntries} {r}");
                        }
                    }
                    else
                    {
                        // send or re-send messages and detect timeouts
                        CheckProgressOfCommitQueue();
                    }

                    // store the current storage batch, if it is not empty
                    StorageBatch <TState> batchBeingSentToStorage = null;
                    if (this.storageBatch.BatchSize > 0)
                    {
                        // get the next batch in place so it can be filled while we store the old one
                        batchBeingSentToStorage = this.storageBatch;
                        this.storageBatch       = new StorageBatch <TState>(batchBeingSentToStorage);

                        // perform the actual store, and record the e-tag
                        this.storageBatch.ETag = await batchBeingSentToStorage.Store(storage);
                    }

                    if (committableEntries > 0)
                    {
                        // update stable state
                        var lastCommittedEntry = commitQueue[committableEntries - 1];
                        this.stableState          = lastCommittedEntry.State;
                        this.stableSequenceNumber = lastCommittedEntry.SequenceNumber;
                        if (logger.IsEnabled(LogLevel.Trace))
                        {
                            logger.Trace($"Stable state version: {this.stableSequenceNumber}");
                        }


                        // remove committed entries from commit queue
                        commitQueue.RemoveFromFront(committableEntries);
                        storageWorker.Notify();  // we have to re-check for work
                    }

                    if (batchBeingSentToStorage != null)
                    {
                        batchBeingSentToStorage.RunFollowUpActions();
                        storageWorker.Notify();  // we have to re-check for work
                    }
                }
            }
            catch (InconsistentStateException e)
            {
                logger.Warn(888, $"reload from storage triggered by e-tag mismatch {e}");

                problemFlag = TransactionalStatus.StorageConflict;
            }
            catch (Exception e)
            {
                logger.Warn(888, $"exception in storageWorker", e);

                problemFlag = TransactionalStatus.UnknownException;
            } finally
            {
                if (problemFlag == TransactionalStatus.Ok)
                {
                    this.failCounter = 0;
                }
                else
                {
                    // after exceptions, we try again, but with limits
                    if (++failCounter < 10)
                    {
                        await Task.Delay(100);

                        // this restarts the worker, which sees the problem flag and recovers.
                        storageWorker.Notify();
                    }
                    else
                    {
                        // bail out
                        logger.Warn(999, $"storageWorker is bailing out");
                    }
                }
            }
        }
Ejemplo n.º 12
0
        private async Task StorageWork()
        {
            try
            {
                // count committable entries at the bottom of the commit queue
                int committableEntries = 0;
                while (committableEntries < commitQueue.Count && commitQueue[committableEntries].ReadyToCommit)
                {
                    committableEntries++;
                }

                // process all committable entries, assembling a storage batch
                if (committableEntries > 0)
                {
                    // process all committable entries, adding storage events to the storage batch
                    CollectEventsForBatch(committableEntries);

                    if (logger.IsEnabled(LogLevel.Debug))
                    {
                        var r = commitQueue.Count > committableEntries ? commitQueue[committableEntries].ToString() : "";
                        logger.Debug($"batchcommit={committableEntries} leave={commitQueue.Count - committableEntries} {r}");
                    }
                }
                else
                {
                    // send or re-send messages and detect timeouts
                    await CheckProgressOfCommitQueue();
                }

                // store the current storage batch, if it is not empty
                StorageBatch <TState> batchBeingSentToStorage = null;
                if (this.storageBatch.BatchSize > 0)
                {
                    // get the next batch in place so it can be filled while we store the old one
                    batchBeingSentToStorage = this.storageBatch;
                    this.storageBatch       = new StorageBatch <TState>(batchBeingSentToStorage);

                    try
                    {
                        if (await batchBeingSentToStorage.CheckStorePreConditions())
                        {
                            // perform the actual store, and record the e-tag
                            this.storageBatch.ETag = await batchBeingSentToStorage.Store(storage);

                            failCounter = 0;
                        }
                        else
                        {
                            logger.LogWarning("Store pre conditions not met.");
                            await AbortAndRestore(TransactionalStatus.CommitFailure);

                            return;
                        }
                    }
                    catch (InconsistentStateException e)
                    {
                        logger.LogWarning(888, e, "Reload from storage triggered by e-tag mismatch.");
                        await AbortAndRestore(TransactionalStatus.StorageConflict, true);

                        return;
                    }
                    catch (Exception e)
                    {
                        logger.Warn(888, $"Storage exception in storageWorker.", e);
                        await AbortAndRestore(TransactionalStatus.UnknownException);

                        return;
                    }
                }

                if (committableEntries > 0)
                {
                    // update stable state
                    var lastCommittedEntry = commitQueue[committableEntries - 1];
                    this.stableState          = lastCommittedEntry.State;
                    this.stableSequenceNumber = lastCommittedEntry.SequenceNumber;
                    if (logger.IsEnabled(LogLevel.Trace))
                    {
                        logger.Trace($"Stable state version: {this.stableSequenceNumber}");
                    }

                    // remove committed entries from commit queue
                    commitQueue.RemoveFromFront(committableEntries);
                    storageWorker.Notify();  // we have to re-check for work
                }

                if (batchBeingSentToStorage != null)
                {
                    batchBeingSentToStorage.RunFollowUpActions();
                    storageWorker.Notify();  // we have to re-check for work
                }
            }
            catch (Exception e)
            {
                logger.LogWarning(888, e, "Exception in storageWorker.  Retry {FailCounter}", failCounter);
                await AbortAndRestore(TransactionalStatus.UnknownException);
            }
        }
Ejemplo n.º 13
0
        /// <summary>
        /// called on activation, and when recovering from storage conflicts or other exceptions.
        /// </summary>
        public async Task NotifyOfRestore()
        {
            var loadresponse = await storage.Load();

            this.storageBatch = new StorageBatch <TState>(loadresponse, this.serializerSettings);

            this.stableState          = loadresponse.CommittedState;
            this.stableSequenceNumber = loadresponse.CommittedSequenceId;

            if (logger.IsEnabled(LogLevel.Debug))
            {
                logger.Debug($"Load v{this.stableSequenceNumber} {loadresponse.PendingStates.Count}p {storageBatch.MetaData.CommitRecords.Count}c");
            }

            // ensure clock is consistent with loaded state
            this.Clock.Merge(storageBatch.MetaData.TimeStamp);

            // resume prepared transactions (not TM)
            foreach (var pr in loadresponse.PendingStates.OrderBy(ps => ps.TimeStamp))
            {
                if (pr.SequenceId > this.stableSequenceNumber && pr.TransactionManager != null)
                {
                    if (logger.IsEnabled(LogLevel.Debug))
                    {
                        logger.Debug($"recover two-phase-commit {pr.TransactionId}");
                    }
                    var tm = (pr.TransactionManager == null) ? null :
                             JsonConvert.DeserializeObject <ITransactionParticipant>(pr.TransactionManager, this.serializerSettings);

                    commitQueue.Add(new TransactionRecord <TState>()
                    {
                        Role                        = CommitRole.RemoteCommit,
                        TransactionId               = Guid.Parse(pr.TransactionId),
                        Timestamp                   = pr.TimeStamp,
                        State                       = pr.State,
                        TransactionManager          = tm,
                        PrepareIsPersisted          = true,
                        LastSent                    = default(DateTime),
                        ConfirmationResponsePromise = null
                    });
                }
            }

            // resume committed transactions (on TM)
            foreach (var kvp in storageBatch.MetaData.CommitRecords)
            {
                if (logger.IsEnabled(LogLevel.Debug))
                {
                    logger.Debug($"recover commit confirmation {kvp.Key}");
                }

                confirmationTasks.Add(kvp.Key, new TransactionRecord <TState>()
                {
                    Role              = CommitRole.LocalCommit,
                    TransactionId     = kvp.Key,
                    Timestamp         = kvp.Value.Timestamp,
                    WriteParticipants = kvp.Value.WriteParticipants
                });
            }

            // clear the problem flag
            problemFlag = TransactionalStatus.Ok;

            // check for work
            this.confirmationWorker.Notify();
            this.storageWorker.Notify();
            this.RWLock.Notify();
        }
        protected override void OnAddToStorageBatch(StorageBatch storageBatch, SyncBlockTransactionsOperation item)
        {
            storageBatch.ExtraData ??= new CirrusStorageBatch();

            if (!(storageBatch.ExtraData is CirrusStorageBatch cirrusStorageBatch))
            {
                throw new ArgumentNullException(nameof(cirrusStorageBatch));
            }

            foreach (Transaction transaction in item.Transactions)
            {
                TxOut smartContractInternalCallTxOut = transaction.Outputs.FirstOrDefault(txOut => txOut.ScriptPubKey.IsSmartContractInternalCall());
                if (smartContractInternalCallTxOut != null)
                {
                    // handle sc internal transfer
                }

                TxOut smartContractTxOut = transaction.Outputs.FirstOrDefault(txOut => txOut.ScriptPubKey.IsSmartContractExec());

                if (smartContractTxOut != null)
                {
                    // is this a smart contract transaction
                    if (smartContractTxOut.ScriptPubKey.IsSmartContractExec())
                    {
                        string contractOpcode = smartContractTxOut.ScriptPubKey.IsSmartContractCreate() ? "create" :
                                                smartContractTxOut.ScriptPubKey.IsSmartContractCall() ? "call" : null;

                        // fetch the contract receipt
                        ContractReceiptResponse receipt = cirrusClient.GetContractInfoAsync(transaction.GetHash().ToString()).Result;

                        if (receipt == null)
                        {
                            throw new ApplicationException($"Smart Contract receipt not found for trx {transaction.GetHash()}");
                        }

                        cirrusStorageBatch.CirrusContractTable.Add(new CirrusContractTable
                        {
                            ContractOpcode     = contractOpcode,
                            ContractCodeType   = receipt.ContractCodeType,
                            MethodName         = receipt.MethodName,
                            NewContractAddress = receipt.NewContractAddress,
                            FromAddress        = receipt.From,
                            ToAddress          = receipt.To,
                            BlockIndex         = item.BlockInfo.Height,
                            TransactionId      = receipt.TransactionHash,
                            Success            = receipt.Success,
                            Error           = receipt.Error,
                            PostState       = receipt.PostState,
                            GasUsed         = receipt.GasUsed,
                            GasPrice        = receipt.GasPrice,
                            Amount          = receipt.Amount,
                            ContractBalance = receipt.ContractBalance,
                            Logs            = receipt.Logs
                        });

                        if (receipt.ContractCodeHash != null)
                        {
                            cirrusStorageBatch.CirrusContractCodeTable.Add(new CirrusContractCodeTable
                            {
                                ContractAddress = receipt.NewContractAddress,
                                BlockIndex      = item.BlockInfo.Height,
                                CodeType        = receipt.ContractCodeType,
                                ContractHash    = receipt.ContractCodeHash,
                                ByteCode        = receipt.ContractBytecode,
                                SourceCode      = receipt.ContractCSharp
                            });
                        }
                    }
                }
            }
        }