public UploadingFileViewModel(string targetPath, string filePath) : base() { Name = Path.GetFileName(filePath); var hash = ETag.ComputeEtag(filePath); Models.GenericResult <Models.UploadToken> x = fileSystem.UploadFile(Name, parentPath: targetPath, Hash: hash, OriginalFilename: Name); if (x.Result.HashCached) { task = new HashCachedTask(); return; } task = EzWcs.EzWcs.NewTask(filePath, x.Result.UploadInfo.Token, x.Result.UploadInfo.UploadUrl); }
static void Main(string[] args) { Auth auth = EnvUtility.EnvAuth(); Config config = EnvUtility.EnvConfig(); // 上传到这个 bucket string bucket = "umu618-docs"; string key = "U-1K.txt"; // 上传前先删除 BucketManager bm = new BucketManager(auth, config); HttpResult result = bm.Delete(bucket, key); Console.WriteLine("---Delete---\n{0}", result.ToString()); // 在内存构造一个文件内容:1024 个 U const long dataSize = 1024; byte[] data = new byte[dataSize]; for (int i = 0; i < dataSize; ++i) { data[i] = 85; } // 一个小时的超时,转为 UnixTime 毫秒数 long deadline = UnixTimestamp.GetUnixTimestamp(3600) * 1000; string putPolicy = "{\"scope\": \"" + bucket + "\",\"deadline\": \"" + deadline + "\"}"; Console.WriteLine("----putPolicy----\n{0}", putPolicy); SimpleUpload su = new SimpleUpload(auth, config); result = su.UploadData(data, putPolicy, key); Console.WriteLine("---UploadData---\n{0}", result.ToString()); // 下面输出的 HASH 值应该一样 if ((int)HttpStatusCode.OK == result.Code) { Console.WriteLine("UploadResult: {0}", Base64.UrlSafeBase64Decode(result.Text)); } Console.WriteLine("UploadData's ETag: {0}", ETag.ComputeEtag(data)); }
private void CarryOn() { foreach (JobInformation jobs in GetJobInformation()) { try { if (jobs.BlockIndex == 0) { jobs.UploadTask.TotalContents[jobs.BlockIndex] = UploadFirstBlock(jobs.Data, jobs.BlockIndex, jobs.UploadTask.Token, jobs.UploadTask.Address, jobs.UploadTask.UploadBatch, Path.GetFileName(jobs.UploadTask.FilePath)); jobs.UploadTask.CompletedBlockCount++; #if DEBUG Console.WriteLine("成功上传首个片"); #endif } else { jobs.UploadTask.TotalContents[jobs.BlockIndex] = UploadBlock(jobs.Data, jobs.BlockIndex, jobs.UploadTask.Token, jobs.UploadTask.Address, jobs.UploadTask.UploadBatch, Path.GetFileName(jobs.UploadTask.FilePath)); jobs.UploadTask.CompletedBlockCount++; #if DEBUG Console.WriteLine($"成功上传第{jobs.BlockIndex}个片"); #endif } } catch (Exception) { jobs.UploadTask.UploadTaskStatus = UploadTaskStatus.Pause; } } SliceUploadTask task = currentTask; if (task.CompletedBlockCount == task.TotalBlockCount) { foreach (string content in task.TotalContents) { if (content == null || content == "") { return; } } HttpResult result = MakeFile(new FileInfo(task.FilePath).Length, Path.GetFileName(task.FilePath), task.TotalContents, task.Token, task.Address, task.UploadBatch); #if DEBUG Console.WriteLine($"成功合成文件{task.FilePath}"); #endif JObject jo = JObject.Parse(result.Text); if (jo["hash"].ToString() != ETag.ComputeEtag(task.FilePath)) { task.UploadTaskStatus = UploadTaskStatus.Error; #if DEBUG Console.WriteLine("上传校验失败"); #endif } else { task.Hash = jo["hash"].ToString(); task.UploadTaskStatus = UploadTaskStatus.Completed; } return; } if (task.CompletedBlockCount >= task.TotalBlockCount) { task.UploadTaskStatus = UploadTaskStatus.Error; } }
static void Main(string[] args) { Auth auth = EnvUtility.EnvAuth(); Config config = EnvUtility.EnvConfig(); // 上传到这个 bucket string bucket = "umu618-docs"; string key = "U-6M.txt"; // 上传前先删除 BucketManager bm = new BucketManager(auth, config); HttpResult result = bm.Delete(bucket, key); Console.WriteLine("---Delete---\n{0}", result.ToString()); // 在内存构造一个文件内容:2M 个 U,2M 个 M, 2M 个 U const long dataSize = 6 * 1024 * 1024; byte[] data = new byte[dataSize]; int i = 0; for (; i < dataSize / 3; ++i) { data[i] = 85; } for (; i < dataSize / 3 * 2; ++i) { data[i] = 77; } for (; i < dataSize; ++i) { data[i] = 85; } // 最后合成文件时的 hash Console.WriteLine("ETag of uploading data: {0}", ETag.ComputeEtag(data)); // 一个小时的超时,转为 UnixTime 毫秒数 long deadline = UnixTimestamp.GetUnixTimestamp(3600) * 1000; string putPolicy = "{\"scope\": \"" + bucket + ":" + key + "\",\"deadline\": \"" + deadline + "\"}"; Console.WriteLine("----putPolicy----\n{0}", putPolicy); string uploadToken = auth.CreateUploadToken(putPolicy); Console.WriteLine("----uploadToken----\n{0}", uploadToken); // 第一个分片不宜太大,因为可能遇到错误,上传太大是白费流量和时间! const long blockSize = 4 * 1024 * 1024; const int firstChunkSize = 1024; SliceUpload su = new SliceUpload(auth, config); result = su.MakeBlock(blockSize, 0, data, 0, firstChunkSize, uploadToken); Console.WriteLine("---MakeBlock---\n{0}", result.ToString()); if ((int)HttpStatusCode.OK == result.Code) { long blockCount = (dataSize + blockSize - 1) / blockSize; string[] contexts = new string[blockCount]; JObject jo = JObject.Parse(result.Text); contexts[0] = jo["ctx"].ToString(); // 上传第 1 个 block 剩下的数据 result = su.Bput(contexts[0], firstChunkSize, data, firstChunkSize, (int)(blockSize - firstChunkSize), uploadToken); Console.WriteLine("---Bput---\n{0}", result.ToString()); if ((int)HttpStatusCode.OK == result.Code) { jo = JObject.Parse(result.Text); contexts[0] = jo["ctx"].ToString(); // 上传后续 block,每次都是一整块上传 for (int blockIndex = 1; blockIndex < blockCount; ++blockIndex) { long leftSize = dataSize - blockSize * blockIndex; int chunkSize = (int)(leftSize > blockSize ? blockSize : leftSize); result = su.MakeBlock(chunkSize, blockIndex, data, (int)(blockSize * blockIndex), chunkSize, uploadToken); Console.WriteLine("---MakeBlock---\n{0}", result.ToString()); if ((int)HttpStatusCode.OK == result.Code) { jo = JObject.Parse(result.Text); contexts[blockIndex] = jo["ctx"].ToString(); } else { Console.WriteLine("----Exit with error----"); return; } } // 合成文件,注意与前面打印的 ETag 对比 result = su.MakeFile(dataSize, key, contexts, uploadToken); Console.WriteLine("---MakeFile---\n{0}", result.ToString()); } } }
public static HttpResult UploadStream(Stream stream, SliceUpload uploader, string policy, string file) { Auth auth = EnvUtility.EnvAuth(); Config config = EnvUtility.EnvConfig(); // 上传到这个 bucket string bucket = "csj-zyk"; string key = file; // 上传前先删除 BucketManager bm = new BucketManager(auth, config); HttpResult result = bm.Delete(bucket, key); long dataSize = GetDataSize(stream.Length); byte[] data = new byte[dataSize]; stream.Read(data, 0, data.Length); // 最后合成文件时的 hash Console.WriteLine("ETag of uploading data: {0}", ETag.ComputeEtag(data)); // 一个小时的超时,转为 UnixTime 毫秒数 long deadline = UnixTimestamp.GetUnixTimestamp(3600) * 1000; string putPolicy = "{\"scope\": \"" + bucket + ":" + key + "\",\"deadline\": \"" + deadline + "\"}"; string uploadToken = auth.CreateUploadToken(putPolicy); // 第一个分片不宜太大,因为可能遇到错误,上传太大是白费流量和时间! const long blockSize = 4 * 1024 * 1024; const int firstChunkSize = 1024; SliceUpload su = uploader; // new SliceUpload(auth, config); result = su.MakeBlock(blockSize, 0, data, 0, firstChunkSize, uploadToken); if ((int)HttpStatusCode.OK == result.Code) { long blockCount = (dataSize + blockSize - 1) / blockSize; string[] contexts = new string[blockCount]; JObject jo = JObject.Parse(result.Text); contexts[0] = jo["ctx"].ToString(); // 上传第 1 个 block 剩下的数据 result = su.Bput(contexts[0], firstChunkSize, data, firstChunkSize, (int)(blockSize - firstChunkSize), uploadToken); if ((int)HttpStatusCode.OK == result.Code) { jo = JObject.Parse(result.Text); contexts[0] = jo["ctx"].ToString(); // 上传后续 block,每次都是一整块上传 for (int blockIndex = 1; blockIndex < blockCount; ++blockIndex) { long leftSize = dataSize - blockSize * blockIndex; int chunkSize = (int)(leftSize > blockSize ? blockSize : leftSize); result = su.MakeBlock(chunkSize, blockIndex, data, (int)(blockSize * blockIndex), chunkSize, uploadToken); if ((int)HttpStatusCode.OK == result.Code) { jo = JObject.Parse(result.Text); contexts[blockIndex] = jo["ctx"].ToString(); } else { return(result); } } // 合成文件,注意与前面打印的 ETag 对比 result = su.MakeFile(dataSize, key, contexts, uploadToken); } } return(result); }