// when we get a progress notification we remember a bunch of state info send // that info is needed to retry later private void OnProgress(object sender, AsyncOperationProgressEventArgs e) { UserState u = e.UserState as UserState; if (u != null && u.Row != null) { string status = ""; if (u.RetryCounter > 1) { status = "Retrying (" + (u.RetryCounter - 1).ToString() + "), uploading: " + e.ProgressPercentage + "% done"; } else { status = "Uploading: " + e.ProgressPercentage + "% done"; } Trace.TraceInformation("OnProgress: " + status); Trace.Indent(); Trace.TraceInformation("Verb: " + e.HttpVerb); Trace.TraceInformation("Uri: " + e.Uri); Trace.TraceInformation("Current position: " + e.Position); Trace.Unindent(); u.CurrentPosition = e.Position; u.ResumeUri = e.Uri; u.HttpVerb = e.HttpVerb; u.Row.Cells[COLUMNINDEX_STATUS].Value = status; } }
void uploader_AsyncOperationProgress(object sender, AsyncOperationProgressEventArgs e) { UploadProgress(sender, e); if (Sitecore.Context.Job != null) { Sitecore.Context.Job.Status.Processed = e.ProgressPercentage; Sitecore.Context.Job.Status.Messages.Add(e.ProgressPercentage.ToString()); } }
// This method is invoked via the AsyncOperation object, // so it is guaranteed to be executed on the correct thread. private void OnAsyncReportProgress(object state) { AsyncOperationProgressEventArgs e = state as AsyncOperationProgressEventArgs; if (this.AsyncOperationProgress != null) { this.AsyncOperationProgress(this, e); } }
private void OnProgress(object sender, AsyncOperationProgressEventArgs e) { this.CancelAsync.Enabled = true; this.CancelAsync.Visible = true; if (this.states.Contains(e.UserState as UserState) == true) { this.progressBar.Value = e.ProgressPercentage; } }
internal bool SendProgressData(AsyncData data, AsyncOperationProgressEventArgs args) { // In this case, don't allow cancellation, as the method // is about to raise the completed event. bool ret = !CheckIfOperationIsCancelled(data.UserData); if (ret == true) { data.Operation.Post(data.Delegate, args); } return(ret); }
private MemoryStream CopyResponseToMemory(AsyncData data, Stream responseStream, long contentLength) { MemoryStream memStream = null; if (responseStream != null) { // read the stream into memory. That's the only way to satisfy the "main work // on the other thread requirement memStream = new MemoryStream(); const int size = 4096; var bytes = new byte[size]; int numBytes; double current = 0; long bytesWritten = 0; while ((numBytes = responseStream.Read(bytes, 0, size)) > 0) { memStream.Write(bytes, 0, numBytes); if (data == null || data.Delegate == null) { continue; } bytesWritten += numBytes; if (contentLength > size) { current = bytesWritten * 100d / contentLength; } // see if we are still in the list... // Multiple threads will access the task dictionary, // so it must be locked to serialize access. if (CheckIfOperationIsCancelled(data.UserData) == true) { throw new ArgumentException("Operation was cancelled"); } var args = new AsyncOperationProgressEventArgs(contentLength, bytesWritten, (int)current, data.UriToUse, data.HttpVerb, data.UserData); data.Operation.Post(data.Delegate, args); } memStream.Seek(0, SeekOrigin.Begin); } return(memStream); }
////////////////////////////////////////////////////////////////////// /// <summary>takes our copy of the stream, and puts it into the request stream /// returns FALSE when we are done by reaching the end of the input stream</summary> ////////////////////////////////////////////////////////////////////// protected bool CopyData(Stream input, HttpWebRequest request, int partIndex, AsyncData data, Uri requestId) { long chunkCounter = 0; long chunkStart = lastChunks[requestId]; long chunkSizeMb = this.chunkSize * ResumableUploader.MB; long dataLength; dataLength = GetStreamLength(input); // calculate the range // move the source stream to the correct position input.Seek(chunkStart, SeekOrigin.Begin); // to reduce memory consumption, we read in 256K chunks const int size = 262144; byte[] bytes = new byte[size]; int numBytes; // first calculate the contentlength. We can not modify // headers AFTER we started writing to the stream // Note: we want to read in chunksize*MB, but it might be less // if we have smaller files or are at the last chunk while ((numBytes = input.Read(bytes, 0, size)) > 0) { chunkCounter += numBytes; if (chunkCounter >= chunkSizeMb) { break; } } request.ContentLength = chunkCounter; long chunkEnd = chunkStart + chunkCounter; // modify the content-range header string contentRange = String.Format("bytes {0}-{1}/{2}", chunkStart, chunkEnd - 1, dataLength > 0 ? dataLength.ToString() : "*"); request.Headers.Add(HttpRequestHeader.ContentRange, contentRange); lastChunks[requestId] = chunkEnd; // save the last start index, need to add 503 error handling to this // stream it into the real request stream using (Stream req = request.GetRequestStream()) { // move the source stream to the correct position input.Seek(chunkStart, SeekOrigin.Begin); chunkCounter = 0; // to reduce memory consumption, we read in 256K chunks while ((numBytes = input.Read(bytes, 0, size)) > 0) { req.Write(bytes, 0, numBytes); chunkCounter += numBytes; // while we are writing along, send notifications out if (data != null) { if (CheckIfOperationIsCancelled(data.UserData)) { break; } else if (data.Delegate != null && data.DataHandler != null) { AsyncOperationProgressEventArgs args; long position = chunkStart + chunkCounter - 1; int percentage = (int)((double)position / dataLength * 100); args = new AsyncOperationProgressEventArgs(dataLength, position, percentage, request.RequestUri, request.Method, data.UserData); data.DataHandler.SendProgressData(data, args); } } if (chunkCounter >= request.ContentLength) { break; } } } return chunkCounter < chunkSizeMb; }
private static void OnSpreadsheetUploadProgress(object in_sender, AsyncOperationProgressEventArgs in_e) { Instance.InstanceData.WorkbookUploadProgress = in_e.ProgressPercentage; }
/// <summary> /// takes our copy of the stream, and puts it into the request stream /// returns FALSE when we are done by reaching the end of the input stream /// </summary> protected bool CopyData(Stream input, HttpWebRequest request, int partIndex, AsyncData data, Uri requestId) { long chunkCounter = 0; long chunkStart = lastChunks[requestId]; long chunkSizeMb = this.chunkSize * ResumableUploader.MB; long dataLength; dataLength = GetStreamLength(input); // calculate the range // move the source stream to the correct position input.Seek(chunkStart, SeekOrigin.Begin); // to reduce memory consumption, we read in 256K chunks const int size = 262144; byte[] bytes = new byte[size]; int numBytes; // first calculate the contentlength. We can not modify // headers AFTER we started writing to the stream // Note: we want to read in chunksize*MB, but it might be less // if we have smaller files or are at the last chunk while ((numBytes = input.Read(bytes, 0, size)) > 0) { chunkCounter += numBytes; if (chunkCounter >= chunkSizeMb) { break; } } request.ContentLength = chunkCounter; long chunkEnd = chunkStart + chunkCounter; // modify the content-range header string contentRange = String.Format("bytes {0}-{1}/{2}", chunkStart, chunkEnd - 1, dataLength > 0 ? dataLength.ToString() : "*"); request.Headers.Set(HttpRequestHeader.ContentRange, contentRange); lastChunks[requestId] = chunkEnd; // save the last start index, need to add 503 error handling to this // stream it into the real request stream using (Stream req = request.GetRequestStream()) { // move the source stream to the correct position input.Seek(chunkStart, SeekOrigin.Begin); chunkCounter = 0; // to reduce memory consumption, we read in 256K chunks while ((numBytes = input.Read(bytes, 0, size)) > 0) { req.Write(bytes, 0, numBytes); chunkCounter += numBytes; // while we are writing along, send notifications out if (data != null) { if (CheckIfOperationIsCancelled(data.UserData)) { break; } else if (data.Delegate != null && data.DataHandler != null) { AsyncOperationProgressEventArgs args; long position = chunkStart + chunkCounter - 1; int percentage = (int)((double)position / dataLength * 100); args = new AsyncOperationProgressEventArgs(dataLength, position, percentage, request.RequestUri, request.Method, data.UserData); data.DataHandler.SendProgressData(data, args); } } if (chunkCounter >= request.ContentLength) { break; } } } return(chunkCounter < chunkSizeMb); }
private void OnProgress(object sender, AsyncOperationProgressEventArgs e) { //MessageBox.Show("progress: " + e.ToString()); Console.WriteLine("Nailed " + e.ProgressPercentage + "%"); }
internal bool SendProgressData(AsyncData data, AsyncOperationProgressEventArgs args) { // In this case, don't allow cancellation, as the method // is about to raise the completed event. bool ret = !CheckIfOperationIsCancelled(data.UserData); if (ret == true) { data.Operation.Post(data.Delegate, args); } return ret; }
private MemoryStream CopyResponseToMemory(AsyncData data, Stream responseStream, long contentLength) { MemoryStream memStream = null; if (responseStream != null) { // read the stream into memory. That's the only way to satisfy the "main work // on the other thread requirement memStream = new MemoryStream(); const int size = 4096; var bytes = new byte[size]; int numBytes; double current = 0; long bytesWritten = 0; while ((numBytes = responseStream.Read(bytes, 0, size)) > 0) { memStream.Write(bytes, 0, numBytes); if (data == null || data.Delegate == null) { continue; } bytesWritten += numBytes; if (contentLength > size) { current = bytesWritten * 100d / contentLength; } // see if we are still in the list... // Multiple threads will access the task dictionary, // so it must be locked to serialize access. if (CheckIfOperationIsCancelled(data.UserData) == true) { throw new ArgumentException("Operation was cancelled"); } var args = new AsyncOperationProgressEventArgs(contentLength, bytesWritten, (int)current, data.UriToUse, data.HttpVerb, data.UserData); data.Operation.Post(data.Delegate, args); } memStream.Seek(0, SeekOrigin.Begin); } return memStream; }
static void OnProgress(object sender, AsyncOperationProgressEventArgs e) { int percentage = e.ProgressPercentage; }
private void resumableUploader_AsyncOperationProgress(object sender, AsyncOperationProgressEventArgs progress) { var uploadProgess = progress.ProgressPercentage; UploadingDispatcher.SetProgress(uploadId, uploadProgess); }
///////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////// /// <summary>takes our copy of the stream, and puts it into the request stream</summary> ////////////////////////////////////////////////////////////////////// protected void CopyRequestData() { if (this.requestCopy != null) { // Since we don't use write buffering on the WebRequest object, // we need to ensure the Content-Length field is correctly set // to the length we want to set. EnsureWebRequest(); this.Request.ContentLength = this.requestCopy.Length; // stream it into the real request stream Stream req = base.GetRequestStream(); try { const int size = 4096; byte[] bytes = new byte[size]; int numBytes; double oneLoop = 100; if (requestCopy.Length > size) { oneLoop = (100 / ((double)this.requestCopy.Length / size)); } // 3 lines of debug code // this.requestCopy.Seek(0, SeekOrigin.Begin); // StreamReader reader = new StreamReader( this.requestCopy ); // string text = reader.ReadToEnd(); this.requestCopy.Seek(0, SeekOrigin.Begin); #if WindowsCE || PocketPC #else long bytesWritten = 0; double current = 0; #endif while ((numBytes = this.requestCopy.Read(bytes, 0, size)) > 0) { req.Write(bytes, 0, numBytes); #if WindowsCE || PocketPC #else bytesWritten += numBytes; if (this.asyncData != null && this.asyncData.Delegate != null && this.asyncData.DataHandler != null) { AsyncOperationProgressEventArgs args; args = new AsyncOperationProgressEventArgs(this.requestCopy.Length, bytesWritten, (int)current, this.Request.RequestUri, this.Request.Method, this.asyncData.UserData); current += oneLoop; if (this.asyncData.DataHandler.SendProgressData(asyncData, args) == false) { break; } } #endif } } finally { req.Close(); } } }