Exemple #1
0
        async void Initialize()
        {
            // TODO: Figure out how to verify the transition from
            // "old table only" to "in migration" to "new table only".
            // (I don't think this is the biggest risk, but I'm still
            // interested in verifying it.)
            // I assume once we do that, the possibility of insertions while
            // we're in "old table only" will model the ability to start
            // from a nonempty table.

            configService = new InMemoryConfigurationService <MTableConfiguration>(
                MasterMigratingTable.INITIAL_CONFIGURATION);
            oldTable       = new InMemoryTable();
            newTable       = new InMemoryTable();
            referenceTable = new InMemoryTableWithHistory();

#if false
            // Second partition from the example in:
            // https://microsoft.sharepoint.com/teams/toolsforeng/Shared%20Documents/ContentRepository/LiveMigration/Migration_slides.pptx
            MTableEntity eMeta = new MTableEntity {
                PartitionKey   = MigrationModel.SINGLE_PARTITION_KEY,
                RowKey         = MigratingTable.ROW_KEY_PARTITION_META,
                partitionState = MTablePartitionState.SWITCHED,
            };
            MTableEntity e0       = TestUtils.CreateTestMTableEntity("0", "orange");
            MTableEntity e1old    = TestUtils.CreateTestMTableEntity("1", "red");
            MTableEntity e2new    = TestUtils.CreateTestMTableEntity("2", "green");
            MTableEntity e3old    = TestUtils.CreateTestMTableEntity("3", "blue");
            MTableEntity e3new    = TestUtils.CreateTestMTableEntity("3", "azure");
            MTableEntity e4old    = TestUtils.CreateTestMTableEntity("4", "yellow");
            MTableEntity e4new    = TestUtils.CreateTestMTableEntity("4", null, true);
            var          oldBatch = new TableBatchOperation();
            oldBatch.InsertOrReplace(eMeta);
            oldBatch.InsertOrReplace(e0);
            oldBatch.InsertOrReplace(e1old);
            oldBatch.InsertOrReplace(e3old);
            oldBatch.InsertOrReplace(e4old);
            IList <TableResult> oldTableResult = await oldTable.ExecuteBatchAsync(oldBatch);
            await ExecuteExportedMirrorBatchAsync(oldBatch, oldTableResult);

            var newBatch = new TableBatchOperation();
            newBatch.InsertOrReplace(e0);
            newBatch.InsertOrReplace(e2new);
            newBatch.InsertOrReplace(e3new);
            newBatch.InsertOrReplace(e4new);
            IList <TableResult> newTableResult = await newTable.ExecuteBatchAsync(newBatch);

            // Allow rows to overwrite rather than composing the virtual ETags manually.
            // InsertOrReplace doesn't use the ETag, so we don't care that the ETag was mutated by the original batch.
            await ExecuteExportedMirrorBatchAsync(newBatch, newTableResult);
#endif

            // Start with the old table now.
            var batch = new TableBatchOperation();
            batch.InsertOrReplace(TestUtils.CreateTestEntity("0", "orange"));
            batch.InsertOrReplace(TestUtils.CreateTestEntity("1", "red"));
            batch.InsertOrReplace(TestUtils.CreateTestEntity("3", "blue"));
            batch.InsertOrReplace(TestUtils.CreateTestEntity("4", "yellow"));
            IList <TableResult> oldTableResult = await oldTable.ExecuteBatchAsync(batch);

            // InsertOrReplace doesn't use the ETag, so we don't care that the ETag was mutated by the original batch.
            await referenceTable.ExecuteMirrorBatchAsync(batch, oldTableResult);

            //CreateMonitor(typeof(RunningServiceMachinesMonitor));

            for (int i = 0; i < MigrationModel.NUM_SERVICE_MACHINES; i++)
            {
                InitializeAppMachine(CreateMachine(typeof(ServiceMachine)));
            }

            InitializeAppMachine(CreateMachine(typeof(MigratorMachine)));

            Send(Id, new TablesMachineInitializedEvent());
        }
        async Task <IList <TableResult> > ExecuteBatchOnNewTableAsync(MTableConfiguration config,
                                                                      TableBatchOperation batch, TableRequestOptions requestOptions, OperationContext operationContext)
        {
            string partitionKey = ChainTableUtils.GetBatchPartitionKey(batch);

            await EnsurePartitionSwitchedAsync(partitionKey, requestOptions, operationContext);

            if (config.state <= TableClientState.PREFER_NEW)
            {
                await EnsureAffectedRowsMigratedAsync(batch, requestOptions, operationContext);
            }

            Attempt:
            // Batch on new table.
            var query = GenerateQueryForAffectedRows(batch);
            IList <MTableEntity> newRows = await newTable.ExecuteQueryAtomicAsync(query, requestOptions, operationContext);

            Dictionary <string, MTableEntity> newDict = newRows.ToDictionary(ent => ent.RowKey);
            // NOTE!  At this point, the read has not yet been annotated.  It is annotated below.

            var newBatch = new TableBatchOperation();
            var inputToNewTableIndexMapping = new List <int?>();

            for (int i = 0; i < batch.Count; i++)
            {
                TableOperation op             = batch[i];
                ITableEntity   passedEntity   = op.GetEntity();
                MTableEntity   existingEntity = newDict.GetValueOrDefault(passedEntity.RowKey);
                TableOperation newOp          = null;
                HttpStatusCode?errorCode      = null;

                TranslateOperationForNewTable(
                    op, existingEntity, config.state <= TableClientState.USE_NEW_WITH_TOMBSTONES,
                    ref newOp, ref errorCode);

                if (errorCode != null)
                {
                    Debug.Assert(newOp == null);
                    await monitor.AnnotateLastBackendCallAsync(wasLinearizationPoint : true);

                    throw ChainTableUtils.GenerateBatchException(errorCode.Value, i);
                }
                if (newOp != null)
                {
                    inputToNewTableIndexMapping.Add(newBatch.Count);
                    newBatch.Add(newOp);
                }
                else
                {
                    inputToNewTableIndexMapping.Add(null);
                }
            }
            await monitor.AnnotateLastBackendCallAsync();

            IList <TableResult> newResults;

            try
            {
                newResults = await newTable.ExecuteBatchAsync(newBatch, requestOptions, operationContext);
            }
            catch (ChainTableBatchException)
            {
                // XXX: Try to distinguish expected concurrency exceptions from unexpected exceptions?
                await monitor.AnnotateLastBackendCallAsync();

                goto Attempt;
            }

            // We made it!
            var results = new List <TableResult>();

            for (int i = 0; i < batch.Count; i++)
            {
                ITableEntity passedEntity  = batch[i].GetEntity();
                int?         newTableIndex = inputToNewTableIndexMapping[i];
                string       newETag       =
                    (IsBugEnabled(MTableOptionalBug.TombstoneOutputETag)
                    ? newTableIndex != null
                    : batch[i].GetOperationType() == TableOperationType.Delete)
                    ? null : newResults[newTableIndex.Value].Etag;
                if (newETag != null)
                {
                    passedEntity.ETag = newETag;
                }
                results.Add(new TableResult
                {
                    HttpStatusCode = (int)(
                        (batch[i].GetOperationType() == TableOperationType.Insert) ? HttpStatusCode.Created : HttpStatusCode.NoContent),
                    Etag   = newETag,
                    Result = passedEntity,
                });
            }
            await monitor.AnnotateLastBackendCallAsync(wasLinearizationPoint : true, successfulBatchResult : results);

            return(results);
        }
            async Task ApplyConfigurationAsync(MTableConfiguration newConfig)
            {
                using (await LockAsyncBuggable())
                {
                    PrimaryKey continuationKey = InternalGetContinuationPrimaryKey();
                    // ExecuteQueryStreamedAsync validated that mtableQuery has no select or top.
                    TableQuery <MTableEntity> newTableContinuationQuery =
                        (continuationKey == null) ? null : new TableQuery <MTableEntity>
                    {
                        // As in ExecuteQueryAtomicAsync, we have to retrieve all
                        // potential shadowing rows.
                        // XXX: This pays even a bigger penalty for not keeping
                        // conditions on the primary key.  But if we were to simply
                        // keep all such conditions, there's a potential to add
                        // more and more continuation filter conditions due to
                        // layering of IChainTable2s, which would lead to some extra
                        // overhead and push us closer to the limit on number of
                        // comparisons.  Either try to parse for this or change
                        // ExecuteQueryStreamedAsync to always take a continuation
                        // primary key?
                        FilterString =
                            outer.IsBugEnabled(MTableOptionalBug.QueryStreamedFilterShadowing)
                                ? ChainTableUtils.CombineFilters(
                                ChainTableUtils.GenerateContinuationFilterCondition(continuationKey),
                                TableOperators.And,
                                mtableQuery.FilterString      // Could be empty.
                                )
                                : ChainTableUtils.GenerateContinuationFilterCondition(continuationKey),
                    };
                    bool justStartedNewStream = false;

                    // Actually, if the query started in state
                    // USE_OLD_HIDE_METADATA, it is API-compliant to continue
                    // returning data from the old table until the migrator starts
                    // deleting it, at which point we switch to the new table.  But
                    // some callers may benefit from fresher data, even if it is
                    // never guaranteed, so we go ahead and start the new stream.
                    // XXX: Is this the right decision?
                    if (newConfig.state > TableClientState.USE_OLD_HIDE_METADATA &&
                        currentConfig.state <= TableClientState.USE_OLD_HIDE_METADATA &&
                        newTableContinuationQuery != null)
                    {
                        newTableStream = await outer.newTable.ExecuteQueryStreamedAsync(newTableContinuationQuery, requestOptions, operationContext);

                        newTableNext = await newTableStream.ReadRowAsync();

                        justStartedNewStream = true;
                    }

                    if (newConfig.state >= TableClientState.USE_NEW_HIDE_METADATA &&
                        currentConfig.state < TableClientState.USE_NEW_HIDE_METADATA)
                    {
                        oldTableStream.Dispose();
                        oldTableStream = null;
                        oldTableNext   = null; // Stop DetermineNextSide from trying to read the old stream.
                        if (!outer.IsBugEnabled(MTableOptionalBug.QueryStreamedBackUpNewStream) &&
                            newTableContinuationQuery != null && !justStartedNewStream)
                        {
                            // The new stream could have gotten ahead of the old
                            // stream if rows had not yet been migrated.  This
                            // was OK as long as we still planned to read those
                            // rows from the old stream, but now we have to back
                            // up the new stream to where the old stream was.
                            newTableStream.Dispose();
                            newTableStream = await outer.newTable.ExecuteQueryStreamedAsync(newTableContinuationQuery, requestOptions, operationContext);

                            newTableNext = await newTableStream.ReadRowAsync();
                        }
                    }
                    if (!outer.IsBugEnabled(MTableOptionalBug.QueryStreamedSaveNewConfig))
                    {
                        currentConfig = newConfig;
                    }
                }
            }
        void TranslateOperationForNewTable(
            TableOperation op, MTableEntity existingEntity, bool leaveTombstones,
            ref TableOperation newOp, ref HttpStatusCode?errorCode)
        {
            ITableEntity       passedEntity = op.GetEntity();
            TableOperationType opType       = op.GetOperationType();

            switch (opType)
            {
            case TableOperationType.Insert:
                if (existingEntity == null)
                {
                    newOp = TableOperation.Insert(ChainTableUtils.CopyEntity <MTableEntity>(passedEntity));
                }
                else if (existingEntity.deleted)
                {
                    newOp = TableOperation.Replace(ImportWithIfMatch(passedEntity, existingEntity.ETag));
                }
                else
                {
                    errorCode = HttpStatusCode.Conflict;
                }
                break;

            case TableOperationType.Replace:
                if ((errorCode = CheckExistingEntity(passedEntity, existingEntity)) == null)
                {
                    newOp = TableOperation.Replace(ImportWithIfMatch(passedEntity, existingEntity.ETag));
                }
                break;

            case TableOperationType.Merge:
                if ((errorCode = CheckExistingEntity(passedEntity, existingEntity)) == null)
                {
                    newOp = TableOperation.Merge(ImportWithIfMatch(passedEntity, existingEntity.ETag));
                }
                break;

            case TableOperationType.Delete:
                string buggablePartitionKey, buggableRowKey;
                if (IsBugEnabled(MTableOptionalBug.DeletePrimaryKey))
                {
                    buggablePartitionKey = buggableRowKey = null;
                }
                else
                {
                    buggablePartitionKey = passedEntity.PartitionKey;
                    buggableRowKey       = passedEntity.RowKey;
                }
                if (leaveTombstones)
                {
                    if (passedEntity.ETag == ChainTable2Constants.ETAG_DELETE_IF_EXISTS)
                    {
                        newOp = TableOperation.InsertOrReplace(new MTableEntity {
                            PartitionKey = buggablePartitionKey, RowKey = buggableRowKey, deleted = true
                        });
                    }
                    else if ((errorCode = CheckExistingEntity(passedEntity, existingEntity)) == null)
                    {
                        newOp = TableOperation.Replace(new MTableEntity {
                            PartitionKey = buggablePartitionKey, RowKey = buggableRowKey,
                            deleted      = true, ETag = existingEntity.ETag
                        });
                    }
                }
                else
                {
                    if (passedEntity.ETag == ChainTable2Constants.ETAG_DELETE_IF_EXISTS)
                    {
                        if (existingEntity != null)
                        {
                            newOp = TableOperation.Delete(new MTableEntity {
                                PartitionKey = buggablePartitionKey, RowKey = buggableRowKey,
                                // It's OK to delete the entity and return success whether or not
                                // the entity is a tombstone by the time it is actually deleted.
                                ETag = IsBugEnabled(MTableOptionalBug.DeleteNoLeaveTombstonesETag) ? null : ChainTable2Constants.ETAG_ANY
                            });
                        }
                        // Otherwise generate nothing.
                        // FIXME: This is not linearizable!  It can also generate empty batches.
                    }
                    else if ((errorCode = CheckExistingEntity(passedEntity, existingEntity)) == null)
                    {
                        // Another client in USE_NEW_WITH_TOMBSTONES could concurrently replace the
                        // entity with a tombstone, in which case we need to return 404 to the caller,
                        // hence this needs to be conditioned on the existing ETag.
                        newOp = TableOperation.Delete(new MTableEntity {
                            PartitionKey = buggablePartitionKey, RowKey = buggableRowKey,
                            ETag         = IsBugEnabled(MTableOptionalBug.DeleteNoLeaveTombstonesETag) ? null : existingEntity.ETag
                        });
                    }
                }
                break;

            case TableOperationType.InsertOrReplace:
                newOp = TableOperation.InsertOrReplace(ChainTableUtils.CopyEntity <MTableEntity>(passedEntity));
                break;

            case TableOperationType.InsertOrMerge:
                newOp = TableOperation.InsertOrMerge(ChainTableUtils.CopyEntity <MTableEntity>(passedEntity));
                break;

            default:
                throw new NotImplementedException();
            }
        }