// In theory, if the migrator dies, you should be able to call this // again (using the same configuration service!) to resume. // XXX: Remember how far we got through a copy or cleanup pass. public async Task MigrateAsync(TableRequestOptions requestOptions = null, OperationContext operationContext = null) { MTableConfiguration config; base.configService.Subscribe(FixedSubscriber <MTableConfiguration> .Instance, out config).Dispose(); // XXX: Ideally we'd lock out other masters somehow. StateSwitch: switch (config.state) { case TableClientState.USE_OLD_HIDE_METADATA: if (IsBugEnabled(MTableOptionalBug.MigrateSkipPreferOld)) { goto case TableClientState.PREFER_OLD; } config = new MTableConfiguration(TableClientState.PREFER_OLD); await configService.PushConfigurationAsync(config); goto StateSwitch; case TableClientState.PREFER_OLD: if (IsBugEnabled(MTableOptionalBug.InsertBehindMigrator)) { goto case TableClientState.PREFER_NEW; } config = new MTableConfiguration(TableClientState.PREFER_NEW); await configService.PushConfigurationAsync(config); goto StateSwitch; case TableClientState.PREFER_NEW: await CopyAsync(requestOptions, operationContext); if (IsBugEnabled(MTableOptionalBug.MigrateSkipUseNewWithTombstones)) { goto case TableClientState.USE_NEW_WITH_TOMBSTONES; } config = new MTableConfiguration(TableClientState.USE_NEW_WITH_TOMBSTONES); await configService.PushConfigurationAsync(config); goto StateSwitch; case TableClientState.USE_NEW_WITH_TOMBSTONES: config = new MTableConfiguration(TableClientState.USE_NEW_HIDE_METADATA); await configService.PushConfigurationAsync(config); goto StateSwitch; case TableClientState.USE_NEW_HIDE_METADATA: await CleanupAsync(requestOptions, operationContext); break; } }
async Task ApplyConfigurationAsync(MTableConfiguration newConfig) { using (await LockAsyncBuggable()) { PrimaryKey continuationKey = InternalGetContinuationPrimaryKey(); // ExecuteQueryStreamedAsync validated that mtableQuery has no select or top. TableQuery <MTableEntity> newTableContinuationQuery = (continuationKey == null) ? null : new TableQuery <MTableEntity> { // As in ExecuteQueryAtomicAsync, we have to retrieve all // potential shadowing rows. // XXX: This pays even a bigger penalty for not keeping // conditions on the primary key. But if we were to simply // keep all such conditions, there's a potential to add // more and more continuation filter conditions due to // layering of IChainTable2s, which would lead to some extra // overhead and push us closer to the limit on number of // comparisons. Either try to parse for this or change // ExecuteQueryStreamedAsync to always take a continuation // primary key? FilterString = outer.IsBugEnabled(MTableOptionalBug.QueryStreamedFilterShadowing) ? ChainTableUtils.CombineFilters( ChainTableUtils.GenerateContinuationFilterCondition(continuationKey), TableOperators.And, mtableQuery.FilterString // Could be empty. ) : ChainTableUtils.GenerateContinuationFilterCondition(continuationKey), }; bool justStartedNewStream = false; // Actually, if the query started in state // USE_OLD_HIDE_METADATA, it is API-compliant to continue // returning data from the old table until the migrator starts // deleting it, at which point we switch to the new table. But // some callers may benefit from fresher data, even if it is // never guaranteed, so we go ahead and start the new stream. // XXX: Is this the right decision? if (newConfig.state > TableClientState.USE_OLD_HIDE_METADATA && currentConfig.state <= TableClientState.USE_OLD_HIDE_METADATA && newTableContinuationQuery != null) { newTableStream = await outer.newTable.ExecuteQueryStreamedAsync(newTableContinuationQuery, requestOptions, operationContext); newTableNext = await newTableStream.ReadRowAsync(); justStartedNewStream = true; } if (newConfig.state >= TableClientState.USE_NEW_HIDE_METADATA && currentConfig.state < TableClientState.USE_NEW_HIDE_METADATA) { oldTableStream.Dispose(); oldTableStream = null; oldTableNext = null; // Stop DetermineNextSide from trying to read the old stream. if (!outer.IsBugEnabled(MTableOptionalBug.QueryStreamedBackUpNewStream) && newTableContinuationQuery != null && !justStartedNewStream) { // The new stream could have gotten ahead of the old // stream if rows had not yet been migrated. This // was OK as long as we still planned to read those // rows from the old stream, but now we have to back // up the new stream to where the old stream was. newTableStream.Dispose(); newTableStream = await outer.newTable.ExecuteQueryStreamedAsync(newTableContinuationQuery, requestOptions, operationContext); newTableNext = await newTableStream.ReadRowAsync(); } } if (!outer.IsBugEnabled(MTableOptionalBug.QueryStreamedSaveNewConfig)) { currentConfig = newConfig; } } }
async Task <IList <TableResult> > ExecuteBatchOnNewTableAsync(MTableConfiguration config, TableBatchOperation batch, TableRequestOptions requestOptions, OperationContext operationContext) { string partitionKey = ChainTableUtils.GetBatchPartitionKey(batch); await EnsurePartitionSwitchedAsync(partitionKey, requestOptions, operationContext); if (config.state <= TableClientState.PREFER_NEW) { await EnsureAffectedRowsMigratedAsync(batch, requestOptions, operationContext); } Attempt: // Batch on new table. var query = GenerateQueryForAffectedRows(batch); IList <MTableEntity> newRows = await newTable.ExecuteQueryAtomicAsync(query, requestOptions, operationContext); Dictionary <string, MTableEntity> newDict = newRows.ToDictionary(ent => ent.RowKey); // NOTE! At this point, the read has not yet been annotated. It is annotated below. var newBatch = new TableBatchOperation(); var inputToNewTableIndexMapping = new List <int?>(); for (int i = 0; i < batch.Count; i++) { TableOperation op = batch[i]; ITableEntity passedEntity = op.GetEntity(); MTableEntity existingEntity = newDict.GetValueOrDefault(passedEntity.RowKey); TableOperation newOp = null; HttpStatusCode?errorCode = null; TranslateOperationForNewTable( op, existingEntity, config.state <= TableClientState.USE_NEW_WITH_TOMBSTONES, ref newOp, ref errorCode); if (errorCode != null) { Debug.Assert(newOp == null); await monitor.AnnotateLastBackendCallAsync(wasLinearizationPoint : true); throw ChainTableUtils.GenerateBatchException(errorCode.Value, i); } if (newOp != null) { inputToNewTableIndexMapping.Add(newBatch.Count); newBatch.Add(newOp); } else { inputToNewTableIndexMapping.Add(null); } } await monitor.AnnotateLastBackendCallAsync(); IList <TableResult> newResults; try { newResults = await newTable.ExecuteBatchAsync(newBatch, requestOptions, operationContext); } catch (ChainTableBatchException) { // XXX: Try to distinguish expected concurrency exceptions from unexpected exceptions? await monitor.AnnotateLastBackendCallAsync(); goto Attempt; } // We made it! var results = new List <TableResult>(); for (int i = 0; i < batch.Count; i++) { ITableEntity passedEntity = batch[i].GetEntity(); int? newTableIndex = inputToNewTableIndexMapping[i]; string newETag = (IsBugEnabled(MTableOptionalBug.TombstoneOutputETag) ? newTableIndex != null : batch[i].GetOperationType() == TableOperationType.Delete) ? null : newResults[newTableIndex.Value].Etag; if (newETag != null) { passedEntity.ETag = newETag; } results.Add(new TableResult { HttpStatusCode = (int)( (batch[i].GetOperationType() == TableOperationType.Insert) ? HttpStatusCode.Created : HttpStatusCode.NoContent), Etag = newETag, Result = passedEntity, }); } await monitor.AnnotateLastBackendCallAsync(wasLinearizationPoint : true, successfulBatchResult : results); return(results); }