async Task IPartitionObserver <DocumentServiceLease> .OnPartitionAcquiredAsync(DocumentServiceLease lease) { Debug.Assert(lease != null && !string.IsNullOrEmpty(lease.Owner), "lease"); TraceLog.Informational(string.Format("Host '{0}' partition {1}: acquired!", this.HostName, lease.PartitionId)); #if DEBUG Interlocked.Increment(ref this.partitionCount); #endif IChangeFeedObserver observer = this.observerFactory.CreateObserver(); ChangeFeedObserverContext context = new ChangeFeedObserverContext { PartitionKeyRangeId = lease.PartitionId }; CancellationTokenSource cancellation = new CancellationTokenSource(); // Create ChangeFeedOptions to use for this worker. ChangeFeedOptions options = new ChangeFeedOptions { MaxItemCount = this.changeFeedOptions.MaxItemCount, PartitionKeyRangeId = this.changeFeedOptions.PartitionKeyRangeId, SessionToken = this.changeFeedOptions.SessionToken, StartFromBeginning = this.changeFeedOptions.StartFromBeginning, RequestContinuation = this.changeFeedOptions.RequestContinuation }; var workerTask = await Task.Factory.StartNew(async() => { ChangeFeedObserverCloseReason?closeReason = null; try { try { await observer.OpenAsync(context); } catch (Exception ex) { TraceLog.Error(string.Format("IChangeFeedObserver.OpenAsync exception: {0}", ex)); closeReason = ChangeFeedObserverCloseReason.ObserverError; throw; } options.PartitionKeyRangeId = lease.PartitionId; if (!string.IsNullOrEmpty(lease.ContinuationToken)) { options.RequestContinuation = lease.ContinuationToken; } CheckpointStats checkpointStats = null; if (!this.statsSinceLastCheckpoint.TryGetValue(lease.PartitionId, out checkpointStats) || checkpointStats == null) { throw new Exception(string.Format(CultureInfo.InvariantCulture, "Failed to get checkpoint stats for partition {0}", lease.PartitionId)); } IDocumentQuery <Document> query = this.documentClient.CreateDocumentChangeFeedQuery(this.collectionSelfLink, options); TraceLog.Verbose(string.Format("Worker start: partition '{0}', continuation '{1}'", lease.PartitionId, lease.ContinuationToken)); try { while (this.isShutdown == 0) { do { DocumentClientException dcex = null; FeedResponse <Document> response = null; try { response = await query.ExecuteNextAsync <Document>(); } catch (DocumentClientException ex) { if (StatusCode.NotFound != (StatusCode)ex.StatusCode && StatusCode.TooManyRequests != (StatusCode)ex.StatusCode && StatusCode.ServiceUnavailable != (StatusCode)ex.StatusCode) { throw; } dcex = ex; } if (dcex != null) { const int ReadSessionNotAvailable = 1002; if (StatusCode.NotFound == (StatusCode)dcex.StatusCode && GetSubStatusCode(dcex) != ReadSessionNotAvailable) { // Most likely, the database or collection was removed while we were enumerating. // Shut down. The user will need to start over. // Note: this has to be a new task, can't await for shutdown here, as shudown awaits for all worker tasks. await Task.Factory.StartNew(() => this.StopAsync(ChangeFeedObserverCloseReason.ResourceGone)); break; } else { Debug.Assert(StatusCode.TooManyRequests == (StatusCode)dcex.StatusCode || StatusCode.ServiceUnavailable == (StatusCode)dcex.StatusCode); TraceLog.Warning(string.Format("Partition {0}: retriable exception : {1}", context.PartitionKeyRangeId, dcex.Message)); await Task.Delay(dcex.RetryAfter != TimeSpan.Zero ? dcex.RetryAfter : this.options.FeedPollDelay, cancellation.Token); } } if (response != null) { if (response.Count > 0) { List <Document> docs = new List <Document>(); docs.AddRange(response); try { context.FeedResponse = response; await observer.ProcessChangesAsync(context, docs); } catch (Exception ex) { TraceLog.Error(string.Format("IChangeFeedObserver.ProcessChangesAsync exception: {0}", ex)); closeReason = ChangeFeedObserverCloseReason.ObserverError; throw; } finally { context.FeedResponse = null; } } checkpointStats.ProcessedDocCount += (uint)response.Count; if (IsCheckpointNeeded(lease, checkpointStats)) { lease = await CheckpointAsync(lease, response.ResponseContinuation, context); checkpointStats.Reset(); } else if (response.Count > 0) { TraceLog.Informational(string.Format("Checkpoint: not checkpointing for partition {0}, {1} docs, new continuation {2} as frequency condition is not met", lease.PartitionId, response.Count, response.ResponseContinuation)); } } }while (query.HasMoreResults && this.isShutdown == 0); if (this.isShutdown == 0) { await Task.Delay(this.options.FeedPollDelay, cancellation.Token); } } // Outer while (this.isShutdown == 0) loop. closeReason = ChangeFeedObserverCloseReason.Shutdown; } catch (TaskCanceledException) { Debug.Assert(cancellation.IsCancellationRequested, "cancellation.IsCancellationRequested"); TraceLog.Informational(string.Format("Cancel signal received for partition {0} worker!", context.PartitionKeyRangeId)); } } catch (LeaseLostException) { closeReason = ChangeFeedObserverCloseReason.LeaseLost; } catch (Exception ex) { TraceLog.Error(string.Format("Partition {0} exception: {1}", context.PartitionKeyRangeId, ex)); if (!closeReason.HasValue) { closeReason = ChangeFeedObserverCloseReason.Unknown; } } if (closeReason.HasValue) { TraceLog.Informational(string.Format("Releasing lease for partition {0} due to an error, reason: {1}!", context.PartitionKeyRangeId, closeReason.Value)); // Note: this has to be a new task, because OnPartitionReleasedAsync awaits for worker task. await Task.Factory.StartNew(async() => await this.partitionManager.TryReleasePartitionAsync(context.PartitionKeyRangeId, true, closeReason.Value)); } TraceLog.Informational(string.Format("Partition {0}: worker finished!", context.PartitionKeyRangeId)); }); var newWorkerData = new WorkerData(workerTask, observer, context, cancellation); this.partitionKeyRangeIdToWorkerMap.AddOrUpdate(context.PartitionKeyRangeId, newWorkerData, (string id, WorkerData d) => { return(newWorkerData); }); }
/// <summary> /// Create leases for new partitions and take care of split partitions. /// </summary> private async Task CreateLeases(IDictionary <string, PartitionKeyRange> ranges) { Debug.Assert(ranges != null); // Get leases after getting ranges, to make sure that no other hosts checked in continuation for split partition after we got leases. var existingLeases = new Dictionary <string, DocumentServiceLease>(); foreach (var lease in await this.leaseManager.ListLeases()) { existingLeases.Add(lease.PartitionId, lease); } var gonePartitionIds = new HashSet <string>(); foreach (var partitionId in existingLeases.Keys) { if (!ranges.ContainsKey(partitionId)) { gonePartitionIds.Add(partitionId); } } var addedPartitionIds = new List <string>(); foreach (var range in ranges) { if (!existingLeases.ContainsKey(range.Key)) { addedPartitionIds.Add(range.Key); } } // Create leases for new partitions, if there was split, use continuation from parent partition. var parentIdToChildLeases = new ConcurrentDictionary <string, ConcurrentQueue <DocumentServiceLease> >(); await addedPartitionIds.ForEachAsync( async addedRangeId => { this.statsSinceLastCheckpoint.AddOrUpdate( addedRangeId, new CheckpointStats(), (partitionId, existingStats) => existingStats); string continuationToken = null; string parentIds = string.Empty; var range = ranges[addedRangeId]; if (range.Parents != null && range.Parents.Count > 0) // Check for split. { foreach (var parentRangeId in range.Parents) { if (gonePartitionIds.Contains(parentRangeId)) { // Transfer continiation from lease for gone parent to lease for its child partition. Debug.Assert(existingLeases[parentRangeId] != null); parentIds += parentIds.Length == 0 ? parentRangeId : "," + parentRangeId; if (continuationToken != null) { TraceLog.Warning(string.Format("Partition {0}: found more than one parent, new continuation '{1}', current '{2}', will use '{3}'", addedRangeId, existingLeases[parentRangeId].ContinuationToken, existingLeases[parentRangeId].ContinuationToken)); } continuationToken = existingLeases[parentRangeId].ContinuationToken; } } } bool wasCreated = await this.leaseManager.CreateLeaseIfNotExistAsync(addedRangeId, continuationToken); if (wasCreated) { if (parentIds.Length == 0) { TraceLog.Informational(string.Format("Created lease for partition '{0}', continuation '{1}'.", addedRangeId, continuationToken)); } else { TraceLog.Informational(string.Format("Created lease for partition '{0}' as child of split partition(s) '{1}', continuation '{2}'.", addedRangeId, parentIds, continuationToken)); } } else { TraceLog.Warning(string.Format("Some other host created lease for '{0}' as child of split partition(s) '{1}', continuation '{2}'.", addedRangeId, parentIds, continuationToken)); } }, this.options.DegreeOfParallelism); // Remove leases for splitted (and thus gone partitions) and update continuation token. await gonePartitionIds.ForEachAsync( async goneRangeId => { await this.leaseManager.DeleteAsync(existingLeases[goneRangeId]); TraceLog.Informational(string.Format("Deleted lease for gone (splitted) partition '{0}', continuation '{1}'", goneRangeId, existingLeases[goneRangeId].ContinuationToken)); CheckpointStats removedStatsUnused; this.statsSinceLastCheckpoint.TryRemove(goneRangeId, out removedStatsUnused); }, this.options.DegreeOfParallelism); }
async Task <IDictionary <string, T> > TakeLeasesAsync() { IDictionary <string, T> allPartitions = new Dictionary <string, T>(); IDictionary <string, T> takenLeases = new Dictionary <string, T>(); IDictionary <string, int> workerToPartitionCount = new Dictionary <string, int>(); List <T> expiredLeases = new List <T>(); foreach (var lease in await this.leaseManager.ListLeases()) { Debug.Assert(lease.PartitionId != null, "TakeLeasesAsync: lease.PartitionId cannot be null."); allPartitions.Add(lease.PartitionId, lease); if (string.IsNullOrWhiteSpace(lease.Owner) || await this.leaseManager.IsExpired(lease)) { TraceLog.Verbose(string.Format("Found unused or expired lease: {0}", lease)); expiredLeases.Add(lease); } else { int count = 0; string assignedTo = lease.Owner; if (workerToPartitionCount.TryGetValue(assignedTo, out count)) { workerToPartitionCount[assignedTo] = count + 1; } else { workerToPartitionCount.Add(assignedTo, 1); } } } if (!workerToPartitionCount.ContainsKey(this.workerName)) { workerToPartitionCount.Add(this.workerName, 0); } int partitionCount = allPartitions.Count; int workerCount = workerToPartitionCount.Count; if (partitionCount > 0) { int target = 1; if (partitionCount > workerCount) { target = (int)Math.Ceiling((double)partitionCount / (double)workerCount); } Debug.Assert(this.options.MinPartitionCount <= this.options.MaxPartitionCount); if (this.options.MaxPartitionCount > 0 && target > this.options.MaxPartitionCount) { target = this.options.MaxPartitionCount; } if (this.options.MinPartitionCount > 0 && target < this.options.MinPartitionCount) { target = this.options.MinPartitionCount; } int myCount = workerToPartitionCount[this.workerName]; int partitionsNeededForMe = target - myCount; TraceLog.Informational( string.Format( "Host '{0}' {1} partitions, {2} hosts, {3} available leases, target = {4}, min = {5}, max = {6}, mine = {7}, will try to take {8} lease(s) for myself'.", this.workerName, partitionCount, workerCount, expiredLeases.Count, target, this.options.MinPartitionCount, this.options.MaxPartitionCount, myCount, Math.Max(partitionsNeededForMe, 0))); if (partitionsNeededForMe > 0) { HashSet <T> partitionsToAcquire = new HashSet <T>(); if (expiredLeases.Count > 0) { foreach (T leaseToTake in expiredLeases) { if (partitionsNeededForMe == 0) { break; } TraceLog.Informational(string.Format("Host '{0}' attempting to take lease for PartitionId '{1}'.", this.workerName, leaseToTake.PartitionId)); T acquiredLease = await this.TryAcquireLeaseAsync(leaseToTake); if (acquiredLease != null) { TraceLog.Informational(string.Format("Host '{0}' successfully acquired lease for PartitionId '{1}': {2}", this.workerName, leaseToTake.PartitionId, acquiredLease)); takenLeases.Add(acquiredLease.PartitionId, acquiredLease); partitionsNeededForMe--; } } } else { KeyValuePair <string, int> workerToStealFrom = default(KeyValuePair <string, int>); foreach (var kvp in workerToPartitionCount) { if (kvp.Equals(default(KeyValuePair <string, int>)) || workerToStealFrom.Value < kvp.Value) { workerToStealFrom = kvp; } } if (workerToStealFrom.Value > target - (partitionsNeededForMe > 1 ? 1 : 0)) { foreach (var kvp in allPartitions) { if (string.Equals(kvp.Value.Owner, workerToStealFrom.Key, StringComparison.OrdinalIgnoreCase)) { T leaseToTake = kvp.Value; TraceLog.Informational(string.Format("Host '{0}' attempting to steal lease from '{1}' for PartitionId '{2}'.", this.workerName, workerToStealFrom.Key, leaseToTake.PartitionId)); T stolenLease = await this.TryStealLeaseAsync(leaseToTake); if (stolenLease != null) { TraceLog.Informational(string.Format("Host '{0}' stole lease from '{1}' for PartitionId '{2}'.", this.workerName, workerToStealFrom.Key, leaseToTake.PartitionId)); takenLeases.Add(stolenLease.PartitionId, stolenLease); partitionsNeededForMe--; // Only steal one lease at a time break; } } } } } } } return(takenLeases); }
Task IPartitionObserver <DocumentServiceLease> .OnPartitionAcquiredAsync(DocumentServiceLease lease) { Debug.Assert(lease != null && !string.IsNullOrEmpty(lease.Owner), "lease"); TraceLog.Informational(string.Format("Host '{0}' partition {1}: acquired!", this.HostName, lease.PartitionId)); #if DEBUG Interlocked.Increment(ref this.partitionCount); #endif IChangeFeedObserver observer = this.observerFactory.CreateObserver(); ChangeFeedObserverContext context = new ChangeFeedObserverContext(lease.PartitionId, this); CancellationTokenSource cancellation = new CancellationTokenSource(); WorkerData workerData = null; ManualResetEvent workerTaskOkToStart = new ManualResetEvent(false); // Create ChangeFeedOptions to use for this worker. ChangeFeedOptions options = new ChangeFeedOptions { MaxItemCount = this.changeFeedOptions.MaxItemCount, PartitionKeyRangeId = this.changeFeedOptions.PartitionKeyRangeId, SessionToken = this.changeFeedOptions.SessionToken, StartFromBeginning = this.changeFeedOptions.StartFromBeginning, StartTime = this.changeFeedOptions.StartTime, RequestContinuation = this.changeFeedOptions.RequestContinuation }; Task workerTask = Task.Run(async() => { ChangeFeedObserverCloseReason?closeReason = null; try { TraceLog.Verbose(string.Format("Worker task waiting for start signal: partition '{0}'", lease.PartitionId)); workerTaskOkToStart.WaitOne(); Debug.Assert(workerData != null); TraceLog.Verbose(string.Format("Worker task started: partition '{0}'", lease.PartitionId)); try { await observer.OpenAsync(context); } catch (Exception ex) { TraceLog.Error(string.Format("IChangeFeedObserver.OpenAsync exception: {0}", ex)); closeReason = ChangeFeedObserverCloseReason.ObserverError; throw; } options.PartitionKeyRangeId = lease.PartitionId; if (!string.IsNullOrEmpty(lease.ContinuationToken)) { options.RequestContinuation = lease.ContinuationToken; } CheckpointStats checkpointStats = null; if (!this.statsSinceLastCheckpoint.TryGetValue(lease.PartitionId, out checkpointStats) || checkpointStats == null) { // It could be that the lease was created by different host and we picked it up. checkpointStats = this.statsSinceLastCheckpoint.AddOrUpdate( lease.PartitionId, new CheckpointStats(), (partitionId, existingStats) => existingStats); Trace.TraceWarning(string.Format("Added stats for partition '{0}' for which the lease was picked up after the host was started.", lease.PartitionId)); } IDocumentQuery <Document> query = this.documentClient.CreateDocumentChangeFeedQuery(this.collectionSelfLink, options); TraceLog.Verbose(string.Format("Worker start: partition '{0}', continuation '{1}'", lease.PartitionId, lease.ContinuationToken)); string lastContinuation = options.RequestContinuation; while (this.isShutdown == 0) { do { ExceptionDispatchInfo exceptionDispatchInfo = null; FeedResponse <Document> response = null; try { response = await query.ExecuteNextAsync <Document>(); lastContinuation = response.ResponseContinuation; } catch (DocumentClientException ex) { exceptionDispatchInfo = ExceptionDispatchInfo.Capture(ex); } if (exceptionDispatchInfo != null) { DocumentClientException dcex = (DocumentClientException)exceptionDispatchInfo.SourceException; if (StatusCode.NotFound == (StatusCode)dcex.StatusCode && SubStatusCode.ReadSessionNotAvailable != (SubStatusCode)GetSubStatusCode(dcex)) { // Most likely, the database or collection was removed while we were enumerating. // Shut down. The user will need to start over. // Note: this has to be a new task, can't await for shutdown here, as shudown awaits for all worker tasks. TraceLog.Error(string.Format("Partition {0}: resource gone (subStatus={1}). Aborting.", context.PartitionKeyRangeId, GetSubStatusCode(dcex))); await Task.Factory.StartNew(() => this.StopAsync(ChangeFeedObserverCloseReason.ResourceGone)); break; } else if (StatusCode.Gone == (StatusCode)dcex.StatusCode) { SubStatusCode subStatusCode = (SubStatusCode)GetSubStatusCode(dcex); if (SubStatusCode.PartitionKeyRangeGone == subStatusCode) { bool isSuccess = await HandleSplitAsync(context.PartitionKeyRangeId, lastContinuation, lease.Id); if (!isSuccess) { TraceLog.Error(string.Format("Partition {0}: HandleSplit failed! Aborting.", context.PartitionKeyRangeId)); await Task.Factory.StartNew(() => this.StopAsync(ChangeFeedObserverCloseReason.ResourceGone)); break; } // Throw LeaseLostException so that we take the lease down. throw new LeaseLostException(lease, exceptionDispatchInfo.SourceException, true); } else if (SubStatusCode.Splitting == subStatusCode) { TraceLog.Warning(string.Format("Partition {0} is splitting. Will retry to read changes until split finishes. {1}", context.PartitionKeyRangeId, dcex.Message)); } else { exceptionDispatchInfo.Throw(); } } else if (StatusCode.TooManyRequests == (StatusCode)dcex.StatusCode || StatusCode.ServiceUnavailable == (StatusCode)dcex.StatusCode) { TraceLog.Warning(string.Format("Partition {0}: retriable exception : {1}", context.PartitionKeyRangeId, dcex.Message)); } else { exceptionDispatchInfo.Throw(); } await Task.Delay(dcex.RetryAfter != TimeSpan.Zero ? dcex.RetryAfter : this.options.FeedPollDelay, cancellation.Token); } if (response != null) { if (response.Count > 0) { List <Document> docs = new List <Document>(); docs.AddRange(response); try { context.FeedResponse = response; await observer.ProcessChangesAsync(context, docs); } catch (Exception ex) { TraceLog.Error(string.Format("IChangeFeedObserver.ProcessChangesAsync exception: {0}", ex)); closeReason = ChangeFeedObserverCloseReason.ObserverError; throw; } finally { context.FeedResponse = null; } } checkpointStats.ProcessedDocCount += (uint)response.Count; if (this.options.IsAutoCheckpointEnabled) { if (IsCheckpointNeeded(lease, checkpointStats)) { lease = workerData.Lease = await this.CheckpointAsync(lease, response.ResponseContinuation, context); checkpointStats.Reset(); } else if (response.Count > 0) { TraceLog.Informational(string.Format("Checkpoint: not checkpointing for partition {0}, {1} docs, new continuation '{2}' as frequency condition is not met", lease.PartitionId, response.Count, response.ResponseContinuation)); } } } }while (query.HasMoreResults && this.isShutdown == 0); if (this.isShutdown == 0) { await Task.Delay(this.options.FeedPollDelay, cancellation.Token); } } // Outer while (this.isShutdown == 0) loop. closeReason = ChangeFeedObserverCloseReason.Shutdown; } catch (LeaseLostException ex) { closeReason = ex.IsGone ? ChangeFeedObserverCloseReason.LeaseGone : ChangeFeedObserverCloseReason.LeaseLost; } catch (TaskCanceledException ex) { if (cancellation.IsCancellationRequested || this.isShutdown != 0) { TraceLog.Informational(string.Format("Cancel signal received for partition {0} worker!", context.PartitionKeyRangeId)); if (!closeReason.HasValue) { closeReason = ChangeFeedObserverCloseReason.Shutdown; } } else { TraceLog.Warning(string.Format("Partition {0}: got task cancelled exception in non-shutdown scenario [cancellation={1}, isShutdown={2}], {3}", context.PartitionKeyRangeId, cancellation.IsCancellationRequested, this.isShutdown, ex.StackTrace)); if (!closeReason.HasValue) { closeReason = ChangeFeedObserverCloseReason.Unknown; } } } catch (Exception ex) { TraceLog.Error(string.Format("Partition {0} exception: {1}", context.PartitionKeyRangeId, ex)); if (!closeReason.HasValue) { closeReason = ChangeFeedObserverCloseReason.Unknown; } } if (closeReason.HasValue) { TraceLog.Informational(string.Format("Releasing lease for partition {0} due to an error, reason: {1}!", context.PartitionKeyRangeId, closeReason.Value)); // Note: this has to be a new task, because OnPartitionReleasedAsync awaits for worker task. await Task.Factory.StartNew(async() => await this.partitionManager.TryReleasePartitionAsync(context.PartitionKeyRangeId, true, closeReason.Value)); } TraceLog.Informational(string.Format("Partition {0}: worker finished!", context.PartitionKeyRangeId)); }); workerData = new WorkerData(workerTask, observer, context, cancellation, lease); this.partitionKeyRangeIdToWorkerMap.AddOrUpdate(context.PartitionKeyRangeId, workerData, (string id, WorkerData d) => { return(workerData); }); workerTaskOkToStart.Set(); return(Task.FromResult(0)); }
async Task LeaseRenewer() { while (this.isStarted == 1 || !this.shutdownComplete) { try { TraceLog.Informational(string.Format("Host '{0}' starting renewal of Leases.", this.workerName)); ConcurrentBag <T> renewedLeases = new ConcurrentBag <T>(); ConcurrentBag <T> failedToRenewLeases = new ConcurrentBag <T>(); List <Task> renewTasks = new List <Task>(); // Renew leases for all currently owned partitions in parallel foreach (T lease in this.currentlyOwnedPartitions.Values) { renewTasks.Add(this.RenewLeaseAsync(lease).ContinueWith(renewResult => { if (renewResult.Result != null) { renewedLeases.Add(renewResult.Result); } else { // Keep track of all failed attempts to renew so we can trigger shutdown for these partitions failedToRenewLeases.Add(lease); } })); } // Renew leases for all partitions currently in shutdown List <T> failedToRenewShutdownLeases = new List <T>(); foreach (T shutdownLeases in this.keepRenewingDuringClose.Values) { renewTasks.Add(this.RenewLeaseAsync(shutdownLeases).ContinueWith(renewResult => { if (renewResult.Result != null) { renewedLeases.Add(renewResult.Result); } else { // Keep track of all failed attempts to renew shutdown leases so we can remove them from further renew attempts failedToRenewShutdownLeases.Add(shutdownLeases); } })); } // Wait for all renews to complete await Task.WhenAll(renewTasks.ToArray()); // Update renewed leases. foreach (T lease in renewedLeases) { bool updateResult = this.currentlyOwnedPartitions.TryUpdate(lease.PartitionId, lease, lease); if (!updateResult) { TraceLog.Warning(string.Format("Host '{0}' Renewed lease {1} but failed to update it in the map (ignorable).", this.workerName, lease)); } } // Trigger shutdown of all partitions we failed to renew leases Parallel.ForEach(failedToRenewLeases, async(lease) => await this.RemoveLeaseAsync(lease, false)); // Now remove all failed renewals of shutdown leases from further renewals foreach (T failedToRenewShutdownLease in failedToRenewShutdownLeases) { T removedLease = null; this.keepRenewingDuringClose.TryRemove(failedToRenewShutdownLease.PartitionId, out removedLease); } await Task.Delay(this.options.LeaseRenewInterval, this.leaseRenewerCancellationTokenSource.Token); } catch (OperationCanceledException) { TraceLog.Informational(string.Format("Host '{0}' Renewer task canceled.", this.workerName)); } catch (Exception ex) { TraceLog.Exception(ex); } } this.currentlyOwnedPartitions.Clear(); this.keepRenewingDuringClose.Clear(); TraceLog.Informational(string.Format("Host '{0}' Renewer task completed.", this.workerName)); }