public override bool Update(int timeout) { if (!this.mDataAvailableEvent.WaitOne(timeout, false)) { return(false); } DateTime now = DateTime.Now; if (this.mDataQueue.Count > 0) { lock (this.mDataQueue) { DataSlice slice; // Check if we're behind. If so, dequeue everything to // try and catch up. do { slice = this.mDataQueue.Dequeue(); } while (now - slice.Timestamp > SkipThreshold && this.mDataQueue.Count > 0); this.mData = slice; if (this.mDataQueue.Count == 0) { this.mDataAvailableEvent.Reset(); } } return(true); } return(false); }
public PSDataSlice(DataSlice dataSlice) { if (dataSlice == null) { throw new ArgumentNullException("dataSlice"); } this.dataSlice = dataSlice; }
private async Task <string> GetMembers(string index, int page, FieldModel field) { return((await _cache.GetOrCreateAsync(index + field.UniqueName, async(cacheEntry) => { cacheEntry.SetSize(1); JsonSerializerOptions options = new JsonSerializerOptions { Converters = { new MembersResponseJsonConverter() } }; IDataStructure data = await LoadData(index); var namesAndTypes = data.GetNameAndTypes(); DataSlice dataSlice = new DataSlice(data); List <object> members = null; bool sorted = false; if (namesAndTypes[field.UniqueName] == ColumnType.stringType) { var column = DataSlice.Data.GetColumn <string>(field.UniqueName); var stringMembers = dataSlice.DataColumnIndexes.Select(index => column[index]).Distinct().ToList(); if (stringMembers.Count != 0) { var first = stringMembers.First(); if (Enum.TryParse(first.ToString(), out Month m) || Enum.TryParse(first.ToString(), out ShortMonth m1)) { stringMembers.Sort(new MonthComparator <string>()); sorted = true; } } members = stringMembers.ConvertAll <object>(new Converter <string, object>(str => (object)str)); } else { var column = DataSlice.Data.GetColumn <double?>(field.UniqueName); members = dataSlice.DataColumnIndexes.Select(index => column[index] as object).Distinct().ToList(); } int pageTotal = (int)Math.Ceiling(members.Count / (double)MEMBERS_PAGE_SIZE); pageTotal = pageTotal == 0 ? 1 : pageTotal; string[] responses = new string[pageTotal]; int currentPage = 0; while (currentPage < pageTotal) { MembersResponse response = new MembersResponse(); response.Sorted = sorted; response.Page = currentPage; response.PageTotal = pageTotal; int from = currentPage * MEMBERS_PAGE_SIZE; int size = Math.Min(members.Count, from + MEMBERS_PAGE_SIZE); for (int i = from; i < size; i++) { response.Members.Add(members[i]); } responses[currentPage] = JsonSerializer.Serialize(response, options); currentPage++; } return responses; }))[page]);
private void RecreateCluster(DataSlice slice) { if (slice.Data != null) { _entity = (CsBackupableEntities)CompactBinaryFormatter.FromByteBuffer(slice.Data, string.Empty); if (slice.SliceHeader.ContentType == DataSliceType.Config) { if (operationHandler != null) { operationHandler.Restore(_entity, _database, Cluster); } } } }
private void OnDataAvailable(float[][] data, float[][] spectrum) { // Assume there is a backlog if 5 seconds are in the queue. This // is reasonable since some codec decoders give us large chunks all // at once. if (this.mDataQueue.Count >= 60 * 5) { return; } // Due to large chunk issue explained above, we need to adjust the // timestamps a bit. If the last slice we looked at was more than // 1/60 of a second ago, then we can use the current time as the // timestamp. Otherwise use the last slice timestamp + 1/60 of a // second. This keeps the slice-dropping algorithm from throwing // out most of the slices as old. DateTime stamp = DateTime.Now; DateTime expected = this.mLastSlice + SliceStride; if (stamp < expected) { stamp = expected; } DataSlice slice = new DataSlice((float)this.mSource.Position / 1000, this.mSource.CurrentTrack.DisplayTrackTitle, data, spectrum, stamp); this.mLastSlice = stamp; lock (this.mDataQueue) { this.mDataQueue.Enqueue(slice); } this.mDataAvailableEvent.Set(); }
/// <summary> /// Gets the first page of data slice instances with the link to the /// next page. /// </summary> /// <param name='resourceGroupName'> /// Required. The resource group name of the data factory. /// </param> /// <param name='dataFactoryName'> /// Required. A unique data factory instance name. /// </param> /// <param name='tableName'> /// Required. A unique table instance name. /// </param> /// <param name='parameters'> /// Required. Parameters specifying how to list data slices of the /// table. /// </param> /// <param name='cancellationToken'> /// Cancellation token. /// </param> /// <returns> /// The List data slices operation response. /// </returns> public async Task <DataSliceListResponse> ListAsync(string resourceGroupName, string dataFactoryName, string tableName, DataSliceListParameters parameters, CancellationToken cancellationToken) { // Validate if (resourceGroupName == null) { throw new ArgumentNullException("resourceGroupName"); } if (resourceGroupName != null && resourceGroupName.Length > 1000) { throw new ArgumentOutOfRangeException("resourceGroupName"); } if (Regex.IsMatch(resourceGroupName, "^[-\\w\\._\\(\\)]+$") == false) { throw new ArgumentOutOfRangeException("resourceGroupName"); } if (dataFactoryName == null) { throw new ArgumentNullException("dataFactoryName"); } if (dataFactoryName != null && dataFactoryName.Length > 63) { throw new ArgumentOutOfRangeException("dataFactoryName"); } if (Regex.IsMatch(dataFactoryName, "^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$") == false) { throw new ArgumentOutOfRangeException("dataFactoryName"); } if (tableName == null) { throw new ArgumentNullException("tableName"); } if (tableName != null && tableName.Length > 260) { throw new ArgumentOutOfRangeException("tableName"); } if (Regex.IsMatch(tableName, "^[A-Za-z0-9_][^<>*#.%&:\\\\+?/]*$") == false) { throw new ArgumentOutOfRangeException("tableName"); } if (parameters == null) { throw new ArgumentNullException("parameters"); } if (parameters.DataSliceRangeEndTime == null) { throw new ArgumentNullException("parameters.DataSliceRangeEndTime"); } if (parameters.DataSliceRangeStartTime == null) { throw new ArgumentNullException("parameters.DataSliceRangeStartTime"); } // Tracing bool shouldTrace = TracingAdapter.IsEnabled; string invocationId = null; if (shouldTrace) { invocationId = TracingAdapter.NextInvocationId.ToString(); Dictionary <string, object> tracingParameters = new Dictionary <string, object>(); tracingParameters.Add("resourceGroupName", resourceGroupName); tracingParameters.Add("dataFactoryName", dataFactoryName); tracingParameters.Add("tableName", tableName); tracingParameters.Add("parameters", parameters); TracingAdapter.Enter(invocationId, this, "ListAsync", tracingParameters); } // Construct URL string url = ""; url = url + "/subscriptions/"; if (this.Client.Credentials.SubscriptionId != null) { url = url + Uri.EscapeDataString(this.Client.Credentials.SubscriptionId); } url = url + "/resourcegroups/"; url = url + Uri.EscapeDataString(resourceGroupName); url = url + "/providers/Microsoft.DataFactory/datafactories/"; url = url + Uri.EscapeDataString(dataFactoryName); url = url + "/datasets/"; url = url + Uri.EscapeDataString(tableName); url = url + "/slices"; List <string> queryParameters = new List <string>(); queryParameters.Add("start=" + Uri.EscapeDataString(parameters.DataSliceRangeStartTime)); queryParameters.Add("end=" + Uri.EscapeDataString(parameters.DataSliceRangeEndTime)); queryParameters.Add("api-version=2015-09-01"); if (queryParameters.Count > 0) { url = url + "?" + string.Join("&", queryParameters); } string baseUrl = this.Client.BaseUri.AbsoluteUri; // Trim '/' character from the end of baseUrl and beginning of url. if (baseUrl[baseUrl.Length - 1] == '/') { baseUrl = baseUrl.Substring(0, baseUrl.Length - 1); } if (url[0] == '/') { url = url.Substring(1); } url = baseUrl + "/" + url; url = url.Replace(" ", "%20"); // Create HTTP transport objects HttpRequestMessage httpRequest = null; try { httpRequest = new HttpRequestMessage(); httpRequest.Method = HttpMethod.Get; httpRequest.RequestUri = new Uri(url); // Set Headers httpRequest.Headers.Add("x-ms-client-request-id", Guid.NewGuid().ToString()); // Set Credentials cancellationToken.ThrowIfCancellationRequested(); await this.Client.Credentials.ProcessHttpRequestAsync(httpRequest, cancellationToken).ConfigureAwait(false); // Send Request HttpResponseMessage httpResponse = null; try { if (shouldTrace) { TracingAdapter.SendRequest(invocationId, httpRequest); } cancellationToken.ThrowIfCancellationRequested(); httpResponse = await this.Client.HttpClient.SendAsync(httpRequest, cancellationToken).ConfigureAwait(false); if (shouldTrace) { TracingAdapter.ReceiveResponse(invocationId, httpResponse); } HttpStatusCode statusCode = httpResponse.StatusCode; if (statusCode != HttpStatusCode.OK) { cancellationToken.ThrowIfCancellationRequested(); CloudException ex = CloudException.Create(httpRequest, null, httpResponse, await httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false)); if (shouldTrace) { TracingAdapter.Error(invocationId, ex); } throw ex; } // Create Result DataSliceListResponse result = null; // Deserialize Response if (statusCode == HttpStatusCode.OK) { cancellationToken.ThrowIfCancellationRequested(); string responseContent = await httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false); result = new DataSliceListResponse(); JToken responseDoc = null; if (string.IsNullOrEmpty(responseContent) == false) { responseDoc = JToken.Parse(responseContent); } if (responseDoc != null && responseDoc.Type != JTokenType.Null) { JToken valueArray = responseDoc["value"]; if (valueArray != null && valueArray.Type != JTokenType.Null) { foreach (JToken valueValue in ((JArray)valueArray)) { DataSlice dataSliceInstance = new DataSlice(); result.DataSlices.Add(dataSliceInstance); JToken startValue = valueValue["start"]; if (startValue != null && startValue.Type != JTokenType.Null) { DateTime startInstance = ((DateTime)startValue); dataSliceInstance.Start = startInstance; } JToken endValue = valueValue["end"]; if (endValue != null && endValue.Type != JTokenType.Null) { DateTime endInstance = ((DateTime)endValue); dataSliceInstance.End = endInstance; } JToken stateValue = valueValue["state"]; if (stateValue != null && stateValue.Type != JTokenType.Null) { string stateInstance = ((string)stateValue); dataSliceInstance.State = stateInstance; } JToken substateValue = valueValue["substate"]; if (substateValue != null && substateValue.Type != JTokenType.Null) { string substateInstance = ((string)substateValue); dataSliceInstance.Substate = substateInstance; } JToken latencyStatusValue = valueValue["latencyStatus"]; if (latencyStatusValue != null && latencyStatusValue.Type != JTokenType.Null) { string latencyStatusInstance = ((string)latencyStatusValue); dataSliceInstance.LatencyStatus = latencyStatusInstance; } JToken retryCountValue = valueValue["retryCount"]; if (retryCountValue != null && retryCountValue.Type != JTokenType.Null) { int retryCountInstance = ((int)retryCountValue); dataSliceInstance.RetryCount = retryCountInstance; } JToken longRetryCountValue = valueValue["longRetryCount"]; if (longRetryCountValue != null && longRetryCountValue.Type != JTokenType.Null) { int longRetryCountInstance = ((int)longRetryCountValue); dataSliceInstance.LongRetryCount = longRetryCountInstance; } } } JToken nextLinkValue = responseDoc["nextLink"]; if (nextLinkValue != null && nextLinkValue.Type != JTokenType.Null) { string nextLinkInstance = ((string)nextLinkValue); result.NextLink = nextLinkInstance; } } } result.StatusCode = statusCode; if (httpResponse.Headers.Contains("x-ms-request-id")) { result.RequestId = httpResponse.Headers.GetValues("x-ms-request-id").FirstOrDefault(); } if (shouldTrace) { TracingAdapter.Exit(invocationId, result); } return(result); } finally { if (httpResponse != null) { httpResponse.Dispose(); } } } finally { if (httpRequest != null) { httpRequest.Dispose(); } } }
/// <summary> /// Gets the next page of data slice instances with the link to the /// next page. /// </summary> /// <param name='nextLink'> /// Required. The url to the next data slices page. /// </param> /// <param name='cancellationToken'> /// Cancellation token. /// </param> /// <returns> /// The List data slices operation response. /// </returns> public async Task <DataSliceListResponse> ListNextAsync(string nextLink, CancellationToken cancellationToken) { // Validate if (nextLink == null) { throw new ArgumentNullException("nextLink"); } // Tracing bool shouldTrace = TracingAdapter.IsEnabled; string invocationId = null; if (shouldTrace) { invocationId = TracingAdapter.NextInvocationId.ToString(); Dictionary <string, object> tracingParameters = new Dictionary <string, object>(); tracingParameters.Add("nextLink", nextLink); TracingAdapter.Enter(invocationId, this, "ListNextAsync", tracingParameters); } // Construct URL string url = ""; url = url + nextLink; url = url.Replace(" ", "%20"); // Create HTTP transport objects HttpRequestMessage httpRequest = null; try { httpRequest = new HttpRequestMessage(); httpRequest.Method = HttpMethod.Get; httpRequest.RequestUri = new Uri(url); // Set Headers httpRequest.Headers.Add("x-ms-client-request-id", Guid.NewGuid().ToString()); // Set Credentials cancellationToken.ThrowIfCancellationRequested(); await this.Client.Credentials.ProcessHttpRequestAsync(httpRequest, cancellationToken).ConfigureAwait(false); // Send Request HttpResponseMessage httpResponse = null; try { if (shouldTrace) { TracingAdapter.SendRequest(invocationId, httpRequest); } cancellationToken.ThrowIfCancellationRequested(); httpResponse = await this.Client.HttpClient.SendAsync(httpRequest, cancellationToken).ConfigureAwait(false); if (shouldTrace) { TracingAdapter.ReceiveResponse(invocationId, httpResponse); } HttpStatusCode statusCode = httpResponse.StatusCode; if (statusCode != HttpStatusCode.OK) { cancellationToken.ThrowIfCancellationRequested(); CloudException ex = CloudException.Create(httpRequest, null, httpResponse, await httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false)); if (shouldTrace) { TracingAdapter.Error(invocationId, ex); } throw ex; } // Create Result DataSliceListResponse result = null; // Deserialize Response if (statusCode == HttpStatusCode.OK) { cancellationToken.ThrowIfCancellationRequested(); string responseContent = await httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false); result = new DataSliceListResponse(); JToken responseDoc = null; if (string.IsNullOrEmpty(responseContent) == false) { responseDoc = JToken.Parse(responseContent); } if (responseDoc != null && responseDoc.Type != JTokenType.Null) { JToken valueArray = responseDoc["value"]; if (valueArray != null && valueArray.Type != JTokenType.Null) { foreach (JToken valueValue in ((JArray)valueArray)) { DataSlice dataSliceInstance = new DataSlice(); result.DataSlices.Add(dataSliceInstance); JToken startValue = valueValue["start"]; if (startValue != null && startValue.Type != JTokenType.Null) { DateTime startInstance = ((DateTime)startValue); dataSliceInstance.Start = startInstance; } JToken endValue = valueValue["end"]; if (endValue != null && endValue.Type != JTokenType.Null) { DateTime endInstance = ((DateTime)endValue); dataSliceInstance.End = endInstance; } JToken stateValue = valueValue["state"]; if (stateValue != null && stateValue.Type != JTokenType.Null) { string stateInstance = ((string)stateValue); dataSliceInstance.State = stateInstance; } JToken substateValue = valueValue["substate"]; if (substateValue != null && substateValue.Type != JTokenType.Null) { string substateInstance = ((string)substateValue); dataSliceInstance.Substate = substateInstance; } JToken latencyStatusValue = valueValue["latencyStatus"]; if (latencyStatusValue != null && latencyStatusValue.Type != JTokenType.Null) { string latencyStatusInstance = ((string)latencyStatusValue); dataSliceInstance.LatencyStatus = latencyStatusInstance; } JToken retryCountValue = valueValue["retryCount"]; if (retryCountValue != null && retryCountValue.Type != JTokenType.Null) { int retryCountInstance = ((int)retryCountValue); dataSliceInstance.RetryCount = retryCountInstance; } JToken longRetryCountValue = valueValue["longRetryCount"]; if (longRetryCountValue != null && longRetryCountValue.Type != JTokenType.Null) { int longRetryCountInstance = ((int)longRetryCountValue); dataSliceInstance.LongRetryCount = longRetryCountInstance; } } } JToken nextLinkValue = responseDoc["nextLink"]; if (nextLinkValue != null && nextLinkValue.Type != JTokenType.Null) { string nextLinkInstance = ((string)nextLinkValue); result.NextLink = nextLinkInstance; } } } result.StatusCode = statusCode; if (httpResponse.Headers.Contains("x-ms-request-id")) { result.RequestId = httpResponse.Headers.GetValues("x-ms-request-id").FirstOrDefault(); } if (shouldTrace) { TracingAdapter.Exit(invocationId, result); } return(result); } finally { if (httpResponse != null) { httpResponse.Dispose(); } } } finally { if (httpRequest != null) { httpRequest.Dispose(); } } }
internal override void Run() { int collectionItterated = 1; LoggerManager.Instance.SetThreadContext(new LoggerContext() { ShardName = _context.LocalShardName != null ? _context.LocalShardName : "", DatabaseName = Database != null ? Database : "" }); try { IDatabaseStore dbInstance = _context.DatabasesManager.GetDatabase(Database); foreach (string _collection in Collections) { if (((Storage.DatabaseStore)dbInstance).Collections.ContainsKey(_collection)) { //Add custom object Query defaultQuery = new Query(); defaultQuery.QueryText = "Select * from "; //M_Note: precausion used incase user goes bonkers whilst naming his collections if (_collection.Contains("\"")) { defaultQuery.QueryText += "$" + _collection + "$"; } else { defaultQuery.QueryText += "\"" + _collection + "\""; } ReadQueryOperation readQueryOperation = new ReadQueryOperation(); readQueryOperation.Database = Database.ToString(); readQueryOperation.Collection = _collection; readQueryOperation.Query = defaultQuery; ArrayList docList = new ArrayList(); long currentSize = 0; DataSlice _activeSlice = PersistenceManager.ActiveContext.GetBackupFile(Database).CreateNewDataSlice(); _activeSlice.SliceHeader.Collection = _collection; _activeSlice.SliceHeader.Database = Database; _activeSlice.SliceHeader.Cluster = Cluster; _activeSlice.SliceHeader.ContentType = DataSliceType.Data; try { ReadQueryResponse readQueryResponse = (ReadQueryResponse)dbInstance.ExecuteReader(readQueryOperation); if (readQueryResponse.IsSuccessfull) { _dbReader = new CollectionReader((DataChunk)readQueryResponse.DataChunk, _context.TopologyImpl, Database, _collection); //create data slice to be written in the common queue while (_dbReader != null && _dbReader.ReadNext() && _dbReader.GetDocument() != null) { //get document and create chunk and add to shared storage IJSONDocument _doc = _dbReader.GetDocument(); // verify size if (currentSize + _doc.Size <= _activeSlice.Capcity) { docList.Add(_doc); currentSize += _doc.Size + 2;// Hack to accomodate the 2 bytes serialization is going add } else { DataSlice _nxtSlice = PersistenceManager.ActiveContext.GetBackupFile(Database).CreateNewDataSlice(); _nxtSlice.SliceHeader.Collection = _collection; _nxtSlice.SliceHeader.Database = Database; _nxtSlice.SliceHeader.Cluster = Cluster; _nxtSlice.SliceHeader.ContentType = DataSliceType.Data; _activeSlice.Data = CompactBinaryFormatter.ToByteBuffer(docList, string.Empty); _activeSlice.SliceHeader.DataCount = docList.Count; _activeSlice.SliceHeader.TotalSize = _activeSlice.Data.LongLength; // Add to shared queue PersistenceManager.SharedQueue.Add(_activeSlice); _activeSlice = _nxtSlice; docList.Clear(); docList.Add(_doc); currentSize = 0; currentSize += _doc.Size; } } _dbReader.Dispose(); // write final data set if (docList.Count > 0) { _activeSlice.Data = CompactBinaryFormatter.ToByteBuffer(docList, string.Empty); _activeSlice.SliceHeader.DataCount = docList.Count; _activeSlice.SliceHeader.TotalSize = _activeSlice.Data.LongLength; // Add to shared queue PersistenceManager.SharedQueue.Add(_activeSlice); docList.Clear(); } // submit status ExecutionStatus.Status = RecoveryStatus.Executing; ExecutionStatus.PercentageExecution = (collectionItterated / Collections.Count);//[M_NOTE] rudementary logic, change this ExecutionStatus.MessageTime = DateTime.Now; ExecutionStatus.Message = "Completed Backup of " + Database + "_" + _collection + " : " + _collection; collectionItterated++; if (ProgressHandler != null) { ProgressHandler.SubmitRecoveryState(ExecutionStatus); } } else { throw new Exception("Operation failed Error code: " + readQueryResponse.ErrorCode); } } catch (Exception ex) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("DatabaseBackupJob.Run()", Database + " : " + ex.ToString()); } collectionItterated--; } } } // Add command slice DataSlice finalSlice = new DataSlice(999999); finalSlice.SliceHeader.Collection = "Complete"; finalSlice.SliceHeader.Database = Database; finalSlice.SliceHeader.Cluster = Cluster; finalSlice.SliceHeader.ContentType = DataSliceType.Command; finalSlice.Data = CompactBinaryFormatter.ToByteBuffer("Data_Complete_Adding", string.Empty); PersistenceManager.SharedQueue.Add(finalSlice); while (!PersistenceManager.SharedQueue.Consumed) { // wait till all data has been consumed and written //M_TODO: // Add timeout interval for file writing, incase the data is not being consumed and timeout span has been reached, break the loop and DIE!!! } if (PersistenceManager.SharedQueue.Consumed) { // submit status ExecutionStatus.Status = RecoveryStatus.Completed; ExecutionStatus.PercentageExecution = 1;//[M_NOTE] rudementary logic, change this ExecutionStatus.MessageTime = DateTime.Now; ExecutionStatus.Message = "Completed Backup of " + Database; } else { // submit status ExecutionStatus.Status = RecoveryStatus.Failure; ExecutionStatus.PercentageExecution = (collectionItterated / Collections.Count);//[M_NOTE] rudementary logic, change this ExecutionStatus.MessageTime = DateTime.Now; ExecutionStatus.Message = "Failed Backup of " + Database; } if (ProgressHandler != null) { System.Threading.Tasks.Task.Factory.StartNew(() => ProgressHandler.SubmitRecoveryState(ExecutionStatus)); } if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsInfoEnabled) { LoggerManager.Instance.RecoveryLogger.Info("DatabaseBackupJob.Run()", Database + "Completed"); } } catch (ThreadAbortException) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsDebugEnabled) { LoggerManager.Instance.RecoveryLogger.Debug("DatabaseBackupJob.Run()", "Thread stopped"); } Thread.ResetAbort(); } catch (Exception exp) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("DatabaseBackupJob.Run()", Database + " : " + exp.ToString()); } ExecutionStatus.Status = RecoveryStatus.Failure; ExecutionStatus.PercentageExecution = (collectionItterated / Collections.Count);//[M_NOTE] rudementary logic, change this ExecutionStatus.MessageTime = DateTime.Now; ExecutionStatus.Message = "Failed Backup of " + Database; if (ProgressHandler != null) { System.Threading.Tasks.Task.Factory.StartNew(() => ProgressHandler.SubmitRecoveryState(ExecutionStatus)); } } }
/// <summary> /// Gets the first page of data slice instances with the link to the /// next page. /// </summary> /// <param name='resourceGroupName'> /// Required. The resource group name of the data factory. /// </param> /// <param name='dataFactoryName'> /// Required. A unique data factory instance name. /// </param> /// <param name='tableName'> /// Required. A unique table instance name. /// </param> /// <param name='dataSliceRangeStartTime'> /// Required. The data slice range start time in round-trip ISO 8601 /// format. /// </param> /// <param name='dataSliceRangeEndTime'> /// Required. The data slice range end time in round-trip ISO 8601 /// format. /// </param> /// <param name='cancellationToken'> /// Cancellation token. /// </param> /// <returns> /// The List data slices operation response. /// </returns> public async Task <DataSliceListResponse> ListAsync(string resourceGroupName, string dataFactoryName, string tableName, string dataSliceRangeStartTime, string dataSliceRangeEndTime, CancellationToken cancellationToken) { // Validate if (resourceGroupName == null) { throw new ArgumentNullException("resourceGroupName"); } if (dataFactoryName == null) { throw new ArgumentNullException("dataFactoryName"); } if (tableName == null) { throw new ArgumentNullException("tableName"); } if (dataSliceRangeStartTime == null) { throw new ArgumentNullException("dataSliceRangeStartTime"); } if (dataSliceRangeEndTime == null) { throw new ArgumentNullException("dataSliceRangeEndTime"); } // Tracing bool shouldTrace = CloudContext.Configuration.Tracing.IsEnabled; string invocationId = null; if (shouldTrace) { invocationId = Tracing.NextInvocationId.ToString(); Dictionary <string, object> tracingParameters = new Dictionary <string, object>(); tracingParameters.Add("resourceGroupName", resourceGroupName); tracingParameters.Add("dataFactoryName", dataFactoryName); tracingParameters.Add("tableName", tableName); tracingParameters.Add("dataSliceRangeStartTime", dataSliceRangeStartTime); tracingParameters.Add("dataSliceRangeEndTime", dataSliceRangeEndTime); Tracing.Enter(invocationId, this, "ListAsync", tracingParameters); } // Construct URL string url = "/subscriptions/" + (this.Client.Credentials.SubscriptionId != null ? this.Client.Credentials.SubscriptionId.Trim() : "") + "/resourcegroups/" + resourceGroupName.Trim() + "/providers/Microsoft.DataFactory/datafactories/" + dataFactoryName.Trim() + "/tables/" + tableName.Trim() + "/slices?"; url = url + "start=" + Uri.EscapeDataString(dataSliceRangeStartTime.Trim()); url = url + "&end=" + Uri.EscapeDataString(dataSliceRangeEndTime.Trim()); url = url + "&api-version=2014-12-01-preview"; string baseUrl = this.Client.BaseUri.AbsoluteUri; // Trim '/' character from the end of baseUrl and beginning of url. if (baseUrl[baseUrl.Length - 1] == '/') { baseUrl = baseUrl.Substring(0, baseUrl.Length - 1); } if (url[0] == '/') { url = url.Substring(1); } url = baseUrl + "/" + url; url = url.Replace(" ", "%20"); // Create HTTP transport objects HttpRequestMessage httpRequest = null; try { httpRequest = new HttpRequestMessage(); httpRequest.Method = HttpMethod.Get; httpRequest.RequestUri = new Uri(url); // Set Headers httpRequest.Headers.Add("x-ms-client-request-id", Guid.NewGuid().ToString()); // Set Credentials cancellationToken.ThrowIfCancellationRequested(); await this.Client.Credentials.ProcessHttpRequestAsync(httpRequest, cancellationToken).ConfigureAwait(false); // Send Request HttpResponseMessage httpResponse = null; try { if (shouldTrace) { Tracing.SendRequest(invocationId, httpRequest); } cancellationToken.ThrowIfCancellationRequested(); httpResponse = await this.Client.HttpClient.SendAsync(httpRequest, cancellationToken).ConfigureAwait(false); if (shouldTrace) { Tracing.ReceiveResponse(invocationId, httpResponse); } HttpStatusCode statusCode = httpResponse.StatusCode; if (statusCode != HttpStatusCode.OK) { cancellationToken.ThrowIfCancellationRequested(); CloudException ex = CloudException.Create(httpRequest, null, httpResponse, await httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false)); if (shouldTrace) { Tracing.Error(invocationId, ex); } throw ex; } // Create Result DataSliceListResponse result = null; // Deserialize Response cancellationToken.ThrowIfCancellationRequested(); string responseContent = await httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false); result = new DataSliceListResponse(); JToken responseDoc = null; if (string.IsNullOrEmpty(responseContent) == false) { responseDoc = JToken.Parse(responseContent); } if (responseDoc != null && responseDoc.Type != JTokenType.Null) { JToken valueArray = responseDoc["value"]; if (valueArray != null && valueArray.Type != JTokenType.Null) { foreach (JToken valueValue in ((JArray)valueArray)) { DataSlice dataSliceInstance = new DataSlice(); result.DataSlices.Add(dataSliceInstance); JToken startValue = valueValue["start"]; if (startValue != null && startValue.Type != JTokenType.Null) { DateTime startInstance = ((DateTime)startValue); dataSliceInstance.Start = startInstance; } JToken endValue = valueValue["end"]; if (endValue != null && endValue.Type != JTokenType.Null) { DateTime endInstance = ((DateTime)endValue); dataSliceInstance.End = endInstance; } JToken statusValue = valueValue["status"]; if (statusValue != null && statusValue.Type != JTokenType.Null) { string statusInstance = ((string)statusValue); dataSliceInstance.Status = statusInstance; } JToken latencyStatusValue = valueValue["latencyStatus"]; if (latencyStatusValue != null && latencyStatusValue.Type != JTokenType.Null) { string latencyStatusInstance = ((string)latencyStatusValue); dataSliceInstance.LatencyStatus = latencyStatusInstance; } JToken retryCountValue = valueValue["retryCount"]; if (retryCountValue != null && retryCountValue.Type != JTokenType.Null) { int retryCountInstance = ((int)retryCountValue); dataSliceInstance.RetryCount = retryCountInstance; } JToken longRetryCountValue = valueValue["longRetryCount"]; if (longRetryCountValue != null && longRetryCountValue.Type != JTokenType.Null) { int longRetryCountInstance = ((int)longRetryCountValue); dataSliceInstance.LongRetryCount = longRetryCountInstance; } } } JToken odatanextLinkValue = responseDoc["@odata.nextLink"]; if (odatanextLinkValue != null && odatanextLinkValue.Type != JTokenType.Null) { string odatanextLinkInstance = ((string)odatanextLinkValue); result.NextLink = odatanextLinkInstance; } } result.StatusCode = statusCode; if (httpResponse.Headers.Contains("x-ms-request-id")) { result.RequestId = httpResponse.Headers.GetValues("x-ms-request-id").FirstOrDefault(); } if (shouldTrace) { Tracing.Exit(invocationId, result); } return(result); } finally { if (httpResponse != null) { httpResponse.Dispose(); } } } finally { if (httpRequest != null) { httpRequest.Dispose(); } } }
public PSDataSlice() { dataSlice = new DataSlice(); }
private void OnDataAvailable(float[][] data, float[][] spectrum) { // Assume there is a backlog if 5 seconds are in the queue. This // is reasonable since some codec decoders give us large chunks all // at once. if (dataQueue.Count >= 60 * 5) return; // Due to large chunk issue explained above, we need to adjust the // timestamps a bit. If the last slice we looked at was more than // 1/60 of a second ago, then we can use the current time as the // timestamp. Otherwise use the last slice timestamp + 1/60 of a // second. This keeps the slice-dropping algorithm from throwing // out most of the slices as old. DateTime stamp = DateTime.Now; DateTime expected = lastSliceTime + sliceStride; if (stamp < expected) stamp = expected; DataSlice slice = new DataSlice((float) source.Position / 1000, source.CurrentTrack.DisplayTrackTitle, data, spectrum, stamp); lastSliceTime = stamp; lock (dataQueue) { dataQueue.Enqueue(slice); } dataAvailableEvent.Set(); }
public override bool Update(int timeout) { if (!dataAvailableEvent.WaitOne(timeout, false)) return false; DateTime now = DateTime.Now; if (dataQueue.Count > 0) { lock (dataQueue) { DataSlice slice; // Check if we're behind. If so, dequeue everything to // try and catch up. do { slice = dataQueue.Dequeue(); } while (now - slice.Timestamp > skipThreshold && dataQueue.Count > 0); dataSlice = slice; if (dataQueue.Count == 0) dataAvailableEvent.Reset(); } return true; } return false; }
// manages status and adds data to collection private void InsertData(DataSlice slice) { List <string> collectionsPersisted = new List <string>();// list of collections that have been persisted, this is useful for status try { if (slice.Data != null) { ArrayList documentsList = CompactBinaryFormatter.FromByteBuffer(slice.Data, string.Empty) as ArrayList; InsertDocumentsOperation insertOperation = new InsertDocumentsOperation(); insertOperation.Documents = documentsList.Cast <IJSONDocument>().ToList(); insertOperation.Database = Database; insertOperation.Collection = slice.SliceHeader.Collection; if (documentsList.Count != slice.SliceHeader.DataCount) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsInfoEnabled) { LoggerManager.Instance.RecoveryLogger.Info("DatabaseRecoveryJob.Run()", "Document not same original: " + slice.SliceHeader.DataCount + " restored : " + documentsList.Count); } } int i = 0; bool success = false; while (i < 3 && !success) { try { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsInfoEnabled) { LoggerManager.Instance.RecoveryLogger.Info("DatabaseRecoveryJob.Run()", slice.SliceHeader.Collection + "_" + slice.SliceHeader.DataCount + " Insrt op" + i + " send at " + DateTime.Now); } InsertDocumentsResponse insertResponse = (InsertDocumentsResponse)_context.TopologyImpl.InsertDocuments(insertOperation); if (!insertResponse.IsSuccessfull) { i++; success = false; _failedDocuments = _failedDocuments.Union(insertResponse.FailedDocumentsList).ToList(); if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsInfoEnabled) { LoggerManager.Instance.RecoveryLogger.Info("DatabaseRecoveryJob.Run()", "ErrorCode: " + insertResponse.ErrorCode + "\t Error: " + insertResponse.Error + " \tFailed Documents Count: " + _failedDocuments.Count); } // will think of something to do with this failure List <IJSONDocument> retryList = new List <IJSONDocument>(); List <IJSONDocument> originalList = insertOperation.Documents.ToList(); foreach (FailedDocument failedDoc in insertResponse.FailedDocumentsList) { foreach (IJSONDocument orgDoc in originalList) { if (orgDoc.Key.Equals(failedDoc.DocumentKey)) { retryList.Add(orgDoc); } } } insertOperation.Documents = retryList; } else { success = true; } } catch (Exception exp) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("DatabaseRecoveryJob.Run() ", slice.SliceHeader.Collection + " " + exp.ToString()); } i++; success = false; // Extreme Hack Thread.Sleep(5000); } } } else if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("DatabaseRecoveryJob.Restore()", "slice.Data is null " + slice.HeaderSize.ToString()); } if (!collectionsPersisted.Contains(slice.SliceHeader.Collection)) { collectionsPersisted.Add(slice.SliceHeader.Collection); ExecutionStatus.Status = RecoveryStatus.Executing; ExecutionStatus.PercentageExecution = collectionsPersisted.Count / Collections.Count;//[M_NOTE] rudementary logic, change this ExecutionStatus.MessageTime = DateTime.Now; ExecutionStatus.Message = "Restoring " + Database + "_" + slice.SliceHeader.Collection; } } catch (Exception ex) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("DatabaseRecoveryJob.Run() ", slice.SliceHeader.Collection + " " + ex.ToString()); } } }
internal override void Run() { try { LoggerManager.Instance.SetThreadContext(new LoggerContext() { ShardName = ExecutionState.Shard != null ? ExecutionState.Shard : "", DatabaseName = "" }); DataSlice _activeSlice = PersistenceManager.ActiveContext.GetBackupFile(RecoveryFolderStructure.CONFIG_SERVER).CreateNewDataSlice(); _activeSlice.SliceHeader.ContentType = _isDIFBackup ? DataSliceType.DifConfig : DataSliceType.Config; _activeSlice.SliceHeader.Cluster = Cluster; if (_entities.Database.Count == 1) { _activeSlice.SliceHeader.Database = _entities.Database.First().Key; } _activeSlice.Data = CompactBinaryFormatter.ToByteBuffer(_entities, string.Empty); _activeSlice.SliceHeader.TotalSize = _activeSlice.Data.LongLength; PersistenceManager.SharedQueue.Add(_activeSlice); DataSlice finalSlice = new DataSlice(999999); finalSlice.SliceHeader.Collection = "Complete"; finalSlice.SliceHeader.Database = "ConfigServer"; finalSlice.SliceHeader.Cluster = Cluster; finalSlice.SliceHeader.ContentType = DataSliceType.Command; finalSlice.Data = CompactBinaryFormatter.ToByteBuffer("Config_Complete_Adding", string.Empty); PersistenceManager.SharedQueue.Add(finalSlice); while (!PersistenceManager.SharedQueue.Consumed) { // wait till all data has been consumed and written //M_TODO: // Add timeout interval for file writing, incase the data is not being consumed and timeout span has been reached, break the loop and DIE!!! } if (PersistenceManager.SharedQueue.Consumed) { ExecutionState.Status = RecoveryStatus.Completed; ExecutionState.PercentageExecution = 100;//[M_NOTE] rudementary logic, change this ExecutionState.MessageTime = DateTime.Now; ExecutionState.Message = "Completed Backup of ConfigServer"; } else { ExecutionState.Status = RecoveryStatus.Failure; ExecutionState.PercentageExecution = 0;//[M_NOTE] rudementary logic, change this ExecutionState.MessageTime = DateTime.Now; ExecutionState.Message = "Failed Backup of ConfigServer"; } System.Threading.Tasks.Task.Factory.StartNew(() => ProgressHandler.SubmitRecoveryState(ExecutionState)); if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsInfoEnabled) { LoggerManager.Instance.RecoveryLogger.Info("ConfigurationBackupJob.Run()", "completed"); } } catch (ThreadAbortException) { Thread.ResetAbort(); } catch (Exception ex) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("ConfigurationBackupJob.Run()", ex.ToString()); } } }