public void SetState(UploadState state) { lock (this) { this.state = state; } }
/// <summary> /// 获取状态 /// </summary> /// <param name="state"></param> /// <returns></returns> public string GetStateMessage(UploadState state) { switch (state) { case UploadState.Success: return "SUCCESS"; case UploadState.FileAccessError: return "权限错误"; case UploadState.SizeLimitExceed: return "文件大小超出服务器限制"; case UploadState.TypeNotAllow: return "不允许的文件格式"; case UploadState.NetworkError: return "网络错误"; } return "未知错误"; }
public void PopulateByOriginalPhotoId(int photoId) { try { var originalPhoto = (from c in Lookup.Db.Table_OriginalPhotosArchives where c.PhotoId == photoId select c).Single(); _uploadState = UploadState.SavedOnlineOk; textBoxPhotoCaption.Text = originalPhoto.Caption; textBoxPhotoDescription.Text = originalPhoto.Description; textBoxPhotoHeight.Text = originalPhoto.Height.ToString(); textBoxPhotoWidth.Text = originalPhoto.Width.ToString(); textBoxPhotoPath.Text = "מארכיון"; Image img = Conversions.ByteArrayToImage(originalPhoto.ImageData.ToArray()); // TODO: need to confirm retrieving well... pictureBoxPreview.Image = img.GetThumbnailImage(Sizes[2, 0], Sizes[2, 1], ThumbnailCallback, IntPtr.Zero); } catch { } }
private string GetStateMessage(UploadState state) { switch (state) { case UploadState.Success: return("SUCCESS"); case UploadState.FileAccessError: return("文件访问出错,请检查写入权限"); case UploadState.SizeLimitExceed: return("文件大小超出服务器限制"); case UploadState.TypeNotAllow: return("不允许的文件格式"); case UploadState.NetworkError: return("网络错误"); } return("未知错误"); }
/// <summary> /// Start asset transfer from the client /// </summary> /// <param name="remoteClient"></param> /// <param name="assetID"></param> /// <param name="transaction"></param> /// <param name="type"></param> /// <param name="data"> /// Optional data. If present then the asset is created immediately with this data /// rather than requesting an upload from the client. The data must be longer than 2 bytes. /// </param> /// <param name="storeLocal"></param> /// <param name="tempFile"></param> public void StartUpload( IClientAPI remoteClient, UUID assetID, UUID transaction, sbyte type, byte[] data, bool storeLocal, bool tempFile) { // MainConsole.Instance.DebugFormat("[ASSET XFER UPLOADER]: Initialised xfer from {0}, asset {1}, transaction {2}, type {3}, storeLocal {4}, tempFile {5}, already received data length {6}", remoteClient.Name, assetID, transaction, type, storeLocal, tempFile, data.Length); lock (this) { if (m_uploadState != UploadState.New) { MainConsole.Instance.WarnFormat( "[ASSET XFER UPLOADER]: Tried to start upload of asset {0}, transaction {1} for {2} but this is already in state {3}. Aborting.", assetID, transaction, remoteClient.Name, m_uploadState); return; } m_uploadState = UploadState.Uploading; } ourClient = remoteClient; m_asset.ID = assetID; m_asset.Type = type; m_asset.CreatorID = remoteClient.AgentId; m_asset.Data = data; if (tempFile) { m_asset.Flags |= AssetFlags.Temporary; } if (m_asset.Data.Length > 2) { SendCompleteMessage(); } else { RequestStartXfer(); } }
protected void SendCompleteMessage() { // We must lock in order to avoid a race with a separate thread dealing with an inventory item or create // message from other client UDP. lock (this) { m_uploadState = UploadState.Complete; ourClient.SendAssetUploadCompleteMessage((sbyte)m_asset.Type, true, m_asset.ID); if (m_createItem) { CompleteCreateItem(m_createItemCallback); } else if (m_updateItem) { CompleteItemUpdate(m_updateItemData); } else if (m_updateTaskItem) { CompleteTaskItemUpdate(m_updateTaskItemData); } } MainConsole.Instance.DebugFormat( "[ASSET XFER UPLOADER]: Uploaded asset {0} for transaction {1}", m_asset.ID, m_transactionID); if (m_dumpAssetToFile) { DateTime now = DateTime.Now; string filename = String.Format("{6}_{7}_{0:d2}{1:d2}{2:d2}_{3:d2}{4:d2}{5:d2}.dat", now.Year, now.Month, now.Day, now.Hour, now.Minute, now.Second, m_asset.Name, m_asset.Type); SaveAssetToFile(filename, m_asset.Data); } }
public static int UploadFilesToServer(FileUpload fileUpload, string serverLocation, bool strict) { int resultState = 0; UploadState uploadState = SaveFile(fileUpload, serverLocation, strict); switch (uploadState) { case UploadState.NOTUPLOADED: resultState = 0; break; case UploadState.UPLOADED: resultState = 1; break; case UploadState.UPLOADEDWITHOTHERNAME: resultState = 2; break; } return(resultState); }
public UploadStatus(UploadState state) { State = state; }
public static void Main(string[] args) { XmlSerializerFactory factory = new XmlSerializerFactory(); XmlSerializer serializer = factory.CreateSerializer(typeof(UploadState)); UploadState us; if (args.Length == 2) { us = new UploadState(args[1], args[0]); } else if (args.Length == 0 && new FileInfo("upload.dat").Exists) { using (FileStream fs = new FileStream("upload.dat", FileMode.Open)) { us = (UploadState)serializer.Deserialize(fs); } } else { Console.Error.WriteLine("Usage: bucket filename"); return; } try { NameValueCollection appConfig = ConfigurationManager.AppSettings; // Print the number of Amazon S3 Buckets. AmazonS3 s3Client = AWSClientFactory.CreateAmazonS3Client( appConfig["AWSAccessKey"], appConfig["AWSSecretKey"] ); if (string.IsNullOrEmpty(us.UploadId)) { InitiateMultipartUploadRequest req = new InitiateMultipartUploadRequest() .WithBucketName(us.BucketName) .WithKey(us.Key) ; us.UploadId = s3Client.InitiateMultipartUpload(req).UploadId; using (FileStream fs = new FileStream("upload.dat", FileMode.OpenOrCreate)) { serializer.Serialize(fs, us); } } while (us.FilePosition < us.FileLength) { try { Console.WriteLine("Uploading part {0} of {1}", us.PartNumber, us.NumChunks); UploadPartRequest ureq = new UploadPartRequest() .WithBucketName(us.BucketName) .WithFilePath(us.FileName) .WithFilePosition(us.FilePosition) .WithPartNumber(us.PartNumber) .WithPartSize(us.FileLength - us.FilePosition > us.ChunkSize ? us.ChunkSize : us.FileLength - us.FilePosition) .WithGenerateChecksum(true) .WithKey(us.Key) .WithUploadId(us.UploadId) .WithSubscriber(new EventHandler<UploadPartProgressArgs>(ShowProgress)) ; if (us.Responses.Count > us.PartNumber - 1) { us.Responses[us.PartNumber - 1] = new PartETag(us.PartNumber, s3Client.UploadPart(ureq).ETag); } else { us.Responses.Insert(us.PartNumber - 1, new PartETag(us.PartNumber, s3Client.UploadPart(ureq).ETag)); } us.PartNumber++; us.FilePosition += us.ChunkSize; using (FileStream fs = new FileStream("upload.dat", FileMode.OpenOrCreate)) { serializer.Serialize(fs, us); } } catch (System.Net.WebException) { } } CompleteMultipartUploadRequest creq = new CompleteMultipartUploadRequest() .WithPartETags(us.Responses) .WithBucketName(us.BucketName) .WithUploadId(us.UploadId) .WithKey(us.Key) ; CompleteMultipartUploadResponse cresp = s3Client.CompleteMultipartUpload(creq); System.Console.WriteLine("File available at {0}", cresp.Location); File.Delete("upload.dat"); } catch (AmazonS3Exception e) { Console.Error.WriteLine(e); } //Console.Write(GetServiceOutput()); //Console.Read(); }
public bool IsStateEqual(UploadState theState) { return _uploadState == theState; }
/// <summary> /// Start asset transfer from the client /// </summary> /// <param name="remoteClient"></param> /// <param name="assetID"></param> /// <param name="transaction"></param> /// <param name="type"></param> /// <param name="data"> /// Optional data. If present then the asset is created immediately with this data /// rather than requesting an upload from the client. The data must be longer than 2 bytes. /// </param> /// <param name="storeLocal"></param> /// <param name="tempFile"></param> public void StartUpload( IClientAPI remoteClient, UUID assetID, UUID transaction, sbyte type, byte[] data, bool storeLocal, bool tempFile) { // m_log.DebugFormat( // "[ASSET XFER UPLOADER]: Initialised xfer from {0}, asset {1}, transaction {2}, type {3}, storeLocal {4}, tempFile {5}, already received data length {6}", // remoteClient.Name, assetID, transaction, type, storeLocal, tempFile, data.Length); lock (this) { if (m_uploadState != UploadState.New) { m_log.WarnFormat( "[ASSET XFER UPLOADER]: Tried to start upload of asset {0}, transaction {1} for {2} but this is already in state {3}. Aborting.", assetID, transaction, remoteClient.Name, m_uploadState); return; } m_uploadState = UploadState.Uploading; } ourClient = remoteClient; m_asset.FullID = assetID; m_asset.Type = type; m_asset.CreatorID = remoteClient.AgentId.ToString(); m_asset.Data = data; m_asset.Local = storeLocal; m_asset.Temporary = tempFile; // m_storeLocal = storeLocal; if (m_asset.Data.Length > 2) { SendCompleteMessage(); } else { RequestStartXfer(); } }
private void OnUploadStateChanged(UploadState state) { if (UploadStateChanged != null) UploadStateChanged(state); }
internal void SetState(UploadState state, UploadTerminationReason reason, string errorMessage) { this._reason = reason; this._errorMessage = errorMessage; this.SetState(state); }
internal void SetState(UploadState state) { this._state = state; this.UpdateStatus(); }
private void SetPhotoFormState(UploadState theState) { _uploadState = theState; labelResultMsg.Text = PhotoFormMsgs[(int)_uploadState]; }
private void sshCp_OnTransferEnd(string src, string dst, long transferredBytes, long totalBytes, string message) { isoUploadState = UploadState.Completed; }
private void ParseOrThrow() { origPreloadedBody = OrigWorker.GetPreloadedEntityBody(); string contentTypeHeader = OrigWorker.GetKnownRequestHeader(HttpWorkerRequest.HeaderContentType); string contentLengthHeader = OrigWorker.GetKnownRequestHeader(HttpWorkerRequest.HeaderContentLength); string transferEncodingHeader = OrigWorker.GetKnownRequestHeader(HttpWorkerRequest.HeaderTransferEncoding); if (contentLengthHeader != null) { origContentLength = Int64.Parse(contentLengthHeader); } if (Config.Current.DebugDirectory != null) { string logEntityBodyBaseName = Path.Combine(Config.Current.DebugDirectory.FullName, DateTime.Now.Ticks.ToString()); LogEntityBodyStream = File.Create(logEntityBodyBaseName + ".body"); LogEntityBodySizesStream = File.CreateText(logEntityBodyBaseName + ".sizes"); LogEntityBodySizesStream.WriteLine(contentTypeHeader); LogEntityBodySizesStream.WriteLine(contentLengthHeader); if (origPreloadedBody != null) { LogEntityBodyStream.Write(origPreloadedBody, 0, origPreloadedBody.Length); LogEntityBodySizesStream.WriteLine(origPreloadedBody.Length); } else { LogEntityBodySizesStream.WriteLine(0); } } FieldNameTranslator translator = new FieldNameTranslator(); if (MultiRequestControlID == null && UploadState != null) { UploadState.BytesTotal += origContentLength; } if (log.IsDebugEnabled) log.Debug("=" + contentLengthHeader + " -> " + origContentLength); boundary = System.Text.Encoding.ASCII.GetBytes("--" + GetAttribute(contentTypeHeader, "boundary")); if (log.IsDebugEnabled) log.Debug("boundary=" + System.Text.Encoding.ASCII.GetString(boundary)); string charset = GetAttribute(contentTypeHeader, "charset"); if (charset != null) { try { System.Text.Encoding encoding = System.Text.Encoding.GetEncoding(charset); ContentEncoding = encoding; } catch (NotSupportedException) { if (log.IsDebugEnabled) log.Debug("Ignoring unsupported charset " + charset + ". Using utf-8."); } } else { ContentEncoding = HttpContext.Current.Response.ContentEncoding; } preloadedEntityBodyStream = new MemoryStream(); Hashtable storageConfigStreamTable = new Hashtable(); Stream postBackIDStream = null; outputStream = preloadedEntityBodyStream; readPos = writePos = parsePos = 0; while (CopyUntilBoundary()) { // If we were writing to a file, close it if (outputStream == fileStream && outputStream != null) { outputStream.Close(); } // If we were receiving the value generated by the HiddenPostBackID control, set the postback ID. if (postBackIDStream != null) { postBackIDStream.Seek(0, System.IO.SeekOrigin.Begin); StreamReader sr = new System.IO.StreamReader(postBackIDStream); translator.PostBackID = sr.ReadToEnd(); postBackIDStream = null; } // parse the headers string name = null, fileName = null, contentType = null; if (boundary[0] != (byte)'\r') { byte[] newBoundary = new byte[boundary.Length + 2]; Buffer.BlockCopy(boundary, 0, newBoundary, 2, boundary.Length); newBoundary[0] = (byte)'\r'; newBoundary[1] = (byte)'\n'; boundary = newBoundary; } else { GetLine(); // Blank line } GetLine(); // boundary line string header; while (null != (header = GetLine())) { if (log.IsDebugEnabled) log.Debug("header=" + header); int colonPos = header.IndexOf(':'); if (colonPos < 0) { break; } string headerName = header.Substring(0, colonPos); if (String.Compare(headerName, "Content-Disposition", true) == 0) { name = GetAttribute(header, "name"); fileName = GetAttribute(header, "filename"); } else if (String.Compare(headerName, "Content-Type", true) == 0) { contentType = header.Substring(colonPos + 1).Trim(); } } if (log.IsDebugEnabled) log.Debug("name = " + name); if (log.IsDebugEnabled) log.Debug("fileName = " + fileName); if (log.IsDebugEnabled) log.DebugFormat("name = " + name); if (log.IsDebugEnabled) log.DebugFormat("fileName = " + fileName); string controlID = null; if (name == Config.Current.PostBackIDQueryParam && postBackIDStream == null) { postBackIDStream = outputStream = new System.IO.MemoryStream(); readPos = parsePos; // Skip past the boundary and headers } else if (name != null && null != (controlID = translator.ConfigFieldNameToControlID(name))) { storageConfigStreamTable[controlID] = outputStream = new System.IO.MemoryStream(); readPos = parsePos; // Skip past the boundary and headers } else if (name != null && null != (controlID = translator.FileFieldNameToControlID(name))) { if (log.IsDebugEnabled) log.DebugFormat("name != null && controlID != null"); if (UploadState == null) { UploadState = UploadStateStore.OpenReadWriteOrCreate(translator.FileFieldNameToPostBackID(name)); if (transferEncodingHeader != "chunked") UploadState.Status = UploadStatus.NormalInProgress; else UploadState.Status = UploadStatus.ChunkedInProgress; UploadState.BytesRead += grandTotalBytesRead; UploadState.BytesTotal += origContentLength; } UploadStorageConfig storageConfig = null; if (UploadState.MultiRequestObject != null) { string secureStorageConfigString = UploadState.MultiRequestObject as string; if (secureStorageConfigString != null) { storageConfig = UploadStorage.CreateUploadStorageConfig(); storageConfig.Unprotect(secureStorageConfigString); if (log.IsDebugEnabled) log.DebugFormat("storageConfig[tempDirectory]={0}", storageConfig["tempDirectory"]); } } string configID = translator.FileIDToConfigID(controlID); MemoryStream storageConfigStream = storageConfigStreamTable[configID] as MemoryStream; if (storageConfigStream != null) { storageConfigStream.Seek(0, System.IO.SeekOrigin.Begin); StreamReader sr = new System.IO.StreamReader(storageConfigStream); string secureStorageConfigString = sr.ReadToEnd(); if (log.IsDebugEnabled) { log.Debug("storageConfigStream = " + secureStorageConfigString); } storageConfig = UploadStorage.CreateUploadStorageConfig(); storageConfig.Unprotect(secureStorageConfigString); // Write out a part for the config hidden field if (log.IsDebugEnabled) log.DebugFormat("Calling WriteReplacementFormField({0}, {1})", configID, secureStorageConfigString); WriteReplacementFormField(configID, secureStorageConfigString); // Remove the stream from the table, so we don't write the replacement field again. storageConfigStreamTable.Remove(configID); } if (fileName != null) { if (log.IsDebugEnabled) log.DebugFormat("filename != null"); if (log.IsDebugEnabled) log.Debug("Calling UploadContext.Current.CreateUploadedFile(" + controlID + "...)"); UploadContext tempUploadContext = new UploadContext(); tempUploadContext._ContentLength = origContentLength; UploadedFile uploadedFile = UploadStorage.CreateUploadedFile(tempUploadContext, controlID, fileName, contentType, storageConfig); UploadState.Files.Add(controlID, uploadedFile); if (MultiRequestControlID == null) RegisterFilesForDisposal(controlID); outputStream = fileStream = uploadedFile.CreateStream(); readPos = parsePos; // Skip past the boundary and headers // If the client-specified content length is too large, we set the status to // RejectedRequestTooLarge so that progress displays will stop. We do this after // having created the UploadedFile because that is necessary for the progress display // to find the uploadContext. if (origContentLength > UploadHttpModule.MaxRequestLength) { if (log.IsDebugEnabled) log.Debug("contentLength > MaxRequestLength"); throw new UploadTooLargeException(UploadHttpModule.MaxRequestLength, origContentLength); } // Write out a replacement part that just contains the filename as the value. WriteReplacementFormField(controlID, fileName); } else { if (log.IsDebugEnabled) log.DebugFormat("filename == null"); // Since filename==null this must just be a hidden field with a name that // looks like a file field. It is just an indication that when this request // ends, the associated uploaded files should be disposed. if (MultiRequestControlID == null) { if (log.IsDebugEnabled) log.DebugFormat("MultiRequestControlID == null"); RegisterFilesForDisposal(controlID); } outputStream = preloadedEntityBodyStream; } } else { outputStream = preloadedEntityBodyStream; } } if (log.IsDebugEnabled) log.Debug("Done parsing."); outputStream.WriteByte(10); outputStream.Close(); preloadedEntityBody = preloadedEntityBodyStream.ToArray(); preloadedEntityBodyStream = null; if (grandTotalBytesRead < origContentLength) { bool isClientConnected = false; try { isClientConnected = OrigWorker.IsClientConnected(); } catch (Exception) { // Mono throws an exception if the client is no longer connected. } if (isClientConnected) { throw new HttpException (400, String.Format("Data length ({0}) is shorter than Content-Length ({1}) and client is still connected after {2} secs.", grandTotalBytesRead, origContentLength, Math.Round(UploadState.TimeElapsed.TotalSeconds))); } else { throw new HttpException (400, String.Format("Client disconnected after receiving {0} of {1} bytes in {2} secs -- user probably cancelled upload.", grandTotalBytesRead, origContentLength, Math.Round(UploadState.TimeElapsed.TotalSeconds))); } } }
internal void x98532580ab8a33a1(long contentLength) { this._start = DateTime.Now; this._contentLength = contentLength; this._state = UploadState.ReceivingData; }
protected void UploadFile(AmazonS3Client Client, string BucketName, FileReference File, long Length, string Key, SemaphoreSlim Semaphore, UploadState State) { try { Retry(() => UploadFileInner(Client, BucketName, File, Length, Key, Semaphore, State), 10); } catch (Exception Ex) { Log.TraceWarning("Exception trying to upload {0}: {1}", File, ExceptionUtils.FormatExceptionDetails(Ex)); } }
internal void SetState(UploadState state, UploadTerminationReason reason) { this.SetState(state, reason, null); }
protected void UploadFileInner(AmazonS3Client Client, string BucketName, FileReference File, long Length, string Key, SemaphoreSlim Semaphore, UploadState State) { try { Semaphore.Wait(); if (FileReference.Exists(File)) { PutObjectRequest Request = new PutObjectRequest(); Request.BucketName = BucketName; Request.Key = Key; Request.FilePath = File.FullName; Request.CannedACL = S3CannedACL.NoACL; Client.PutObject(Request); } if (State != null) { Interlocked.Increment(ref State.NumFiles); Interlocked.Add(ref State.NumBytes, Length); } } finally { Semaphore.Release(); } }
internal void SetState(UploadState state, UploadTerminationReason reason, Exception[] errors, string errorMessage) { this._errors = errors; this.SetState(state, reason, errorMessage); }
private UploadState SaveFile(string serverLocation, string RenameFile, bool strict) { try { if (this.HasFile) { if (AllowExtension()) { UploadState uploadState = UploadState.NOTUPLOADED; // Obtenemos el nombre del archivo a subir, renombramos en caso de ser solicitado. string fileName = RenameFile.Equals(string.Empty) ? this.FileName : RenameFile; if (!strict) { // Creamos ruta con nombre del archivo a subir para controlar duplicados. string pathToCheck = serverLocation + fileName; string fileNameBefore = fileName; fileName = GetNewFileName(serverLocation, fileName, pathToCheck); // El archivo, en caso de no haber excepción, se subirá con un nombre diferente o con // el nombre original, dependiendo de su existencia o no en el servidor. if (!fileNameBefore.Equals(fileName)) { uploadState = UploadState.UPLOADEDWITHOTHERNAME; this.RenamedFileName = fileName; } else { uploadState = UploadState.UPLOADED; } } else { // El archivo, en caso de no haber excepción, se subirá con el nombre original. uploadState = UploadState.UPLOADED; } // Agregamos a la ruta del servidor, el nombre del archivo. serverLocation += fileName; // Llamado al método SaveAs para subir el archivo // en el directorio especificado. this.SaveAs(serverLocation); return(uploadState); } else { return(UploadState.NOTALLOWEXTENSION); } } else { return(UploadState.NOTUPLOADED); } } catch { return(UploadState.NOTUPLOADED); } }
/// <summary> /// 设置当前上传进度信息的状态 /// </summary> /// <param name="app"></param> /// <param name="state"></param> void SetUploadState(HttpApplication app, UploadState state) { string uploadId = app.Request.QueryString["UploadID"]; if (uploadId != null && uploadId.Length > 0) { Progress progress = GetProgress(uploadId, app.Application); if (progress != null) { progress.SetState(state); SetProgress(uploadId, progress, app.Application); } } }
private string GetStateMessage(UploadState state) { switch (state) { case UploadState.Success: return "SUCCESS"; case UploadState.FileAccessError: return "文件访问出错,请检查写入权限"; case UploadState.SizeLimitExceed: return "文件大小超出服务器限制"; case UploadState.TypeNotAllow: return "不允许的文件格式"; case UploadState.NetworkError: return "网络错误"; case UploadState.NotLogin: return "您未登录,请先登录"; } return "未知错误"; }
protected void SendCompleteMessage() { // We must lock in order to avoid a race with a separate thread dealing with an inventory item or create // message from other client UDP. lock (this) { m_uploadState = UploadState.Complete; ourClient.SendAssetUploadCompleteMessage(m_asset.Type, true, m_asset.FullID); if (m_createItem) { CompleteCreateItem(m_createItemCallback); } else if (m_updateItem) { CompleteItemUpdate(m_updateItemData); } else if (m_updateTaskItem) { CompleteTaskItemUpdate(m_updateTaskItemData); } // else if (m_storeLocal) // { // m_Scene.AssetService.Store(m_asset); // } } m_log.DebugFormat( "[ASSET XFER UPLOADER]: Uploaded asset {0} for transaction {1}", m_asset.FullID, m_transactionID); if (m_dumpAssetToFile) { DateTime now = DateTime.Now; string filename = String.Format("{6}_{7}_{0:d2}{1:d2}{2:d2}_{3:d2}{4:d2}{5:d2}.dat", now.Year, now.Month, now.Day, now.Hour, now.Minute, now.Second, m_asset.Name, m_asset.Type); SaveAssetToFile(filename, m_asset.Data); } }
private async void UploadServer(object para) { List <IDocumentService> DocArray = para as List <IDocumentService>; _busyIndicator.Progress = 5; _busyIndicator.Content = @"begin to project info...5%"; #region generate html //load page data and create all images if (DocArray == null) { _uploadState = UploadState.upload_Generate; await AsyncConvertAllPagesForUpload(5, 30); } _procesVM.IsCloseEnable = false; _busyIndicator.Progress = 35; _busyIndicator.Content = @"Generate the HTML Page...35%"; //await Task.Factory.StartNew(GenerateHtml); bool bIsSuccessful = false; if (DocArray == null) { bIsSuccessful = await Task.Factory.StartNew <bool>(GenerateHtml); } else { _outputFolder = _outputFolder + @"\data"; //foreach (IDocumentService doc in DocArray) //{ // await AsyncConvertAllPages(doc.Document); //} int i = 0; foreach (IDocumentService doc in DocArray) { await AsyncConvertAllPages(doc.Document); string szLocation = _outputFolder + @"\" + i++; bIsSuccessful = await Task.Factory.StartNew <bool>(() => GenerateMD5Html(doc, szLocation)); if (bIsSuccessful == false) { _busyIndicator.IsShow = false; _htmlService.IsHtmlGenerating = false; break; } _busyIndicator.Progress = _busyIndicator.Progress + 3; } bool bSuccessful = await Task.Factory.StartNew <bool>(() => GenerateMD5DifferInfo(DocArray, _outputFolder)); if (bSuccessful == false) { _busyIndicator.IsShow = false; _htmlService.IsHtmlGenerating = false; MessageBox.Show(GlobalData.FindResource("Error_Generate_Html_Access")); return; } } _htmlService.ImagesStreamManager.WorkingDirectory = string.Empty; if (!bIsSuccessful) { string errorMessage = @"Generate Html failed !"; UploadFailed(errorMessage, string.Empty); return; } #endregion #region ZIP File _busyIndicator.Progress = 40; _busyIndicator.Content = @"Zip file...40%"; string upLoadPath = _upLoadPara.ProjectPath + @"Upload.zip"; await ZIPFiles(_upLoadPara.ProjectPath, upLoadPath); #endregion #region Publish _busyIndicator.Progress = 50; //now enable Hide window _procesVM.IsCloseEnable = true; if (_procesWindow != null) { _procesWindow.IsAllowClose = true; } _uploadState = UploadState.upload_Update; _busyIndicator.Content = @"Publish File...50%"; // FileInfo if (CheckFileInfo(upLoadPath)) { if (_upLoadPara.IsNewProject)//Create new { await HttpUploadFiles(upLoadPath, false); } else { await HttpUploadFiles(upLoadPath, true, _upLoadPara.id); } Naver.Compass.Common.CommonBase.NLogger.Debug("Project publish end,Path->" + _upLoadPara.ProjectPath + ";GUID->" + _upLoadPara.DocGUID); } else { UploadFailed(GlobalData.FindResource("Publish_File_Size_Error"), upLoadPath); } #endregion }