private static ISync CreateSyncForAccount(MetaDataService.AccountInfo account, string containerName, Logger log, SynchronizeDirection synchronizeDirection = SynchronizeDirection.Upload) { // Create Synchronizer if (account.location == "None") { return(null); } else { LocationInfo Li = new LocationInfo(account.accountName, account.accountKey, SyncFactory.GetSynchronizerType(account.location)); ISync synchronizer = SyncFactory.Instance.CreateSynchronizer(Li, containerName, log, synchronizeDirection); return(synchronizer); } }
protected void CreateSync(SynchronizeDirection dir) { // Create Synchronizer if (account.location == "None") { synchronizer = null; } else if (streamtype == StreamFactory.StreamSecurityType.Secure) { LocationInfo Li = new LocationInfo(account.accountName, account.accountKey, SyncFactory.GetSynchronizerType(account.location)); synchronizer = SyncFactory.Instance.CreateSynchronizer(Li, streamid.ToString().Replace('/', '-').ToLower() + "-" + seq_num, logger, dir, streamcompressiontype, this.StreamChunkSizeForUpload, this.StreamThreadPoolSize, EncryptionType.AES, acl_md.encKey, acl_md.IV); synchronizer.SetLocalSource(targetDir); synchronizer.SetIndexFileName(IndexFileName); synchronizer.SetDataFileName(DataLogFileName); } else { LocationInfo Li = new LocationInfo(account.accountName, account.accountKey, SyncFactory.GetSynchronizerType(account.location)); synchronizer = SyncFactory.Instance.CreateSynchronizer(Li, streamid.ToString().Replace('/', '-').ToLower() + "-" + seq_num, logger, dir, streamcompressiontype, this.StreamChunkSizeForUpload, this.StreamThreadPoolSize); synchronizer.SetLocalSource(targetDir); synchronizer.SetIndexFileName(IndexFileName); synchronizer.SetDataFileName(DataLogFileName); } }
public byte[] UploadFileAsChunks(string filePath) { string blobName; List <ChunkInfo> chunkList_cloud = new List <ChunkInfo>();; // list of chunk indexed by chunk-index (e.g. 0, 1, 2,....) List <ChunkInfo> chunkList_local; // list of chunk indexed by chunk-index (e.g. 0, 1, 2,....) try { // First check if chunkMD exists on the server or not if (logger != null) { logger.Log("Start Synchronizer Check Blob Exists"); } blobName = Path.GetFileName(filePath); CloudBlockBlob chunkMDblockBlob = GetBlockBlobReference(ChunkMetadataBlobPrefix + blobName); bool blobExists = BlockBlobExists(chunkMDblockBlob); if (logger != null) { logger.Log("End Synchronizer Check Blob Exists"); } if (blobExists) { if (logger != null) { logger.Log("Start Synchronizer Fill Remote ChunkList"); } // Fill chunkList FileMD fileMD = JsonConvert.DeserializeObject <FileMD>(chunkMDblockBlob.DownloadText()); StaticChunkSize = fileMD.StaticChunkSize; // Get chunklist at cloud in memory locally chunkList_cloud = fileMD.ChunkList; if (logger != null) { logger.Log("End Synchronizer Fill Remote ChunkList"); } chunkCompressionType = SyncFactory.GetCompressionType(fileMD.compressionType); chunkEncryptionType = SyncFactory.GetEncryptionType(fileMD.encryptionType); } if (logger != null) { logger.Log("Start Synchronizer Fill Local ChunkList"); } StaticChunk staticChunker = new StaticChunk(StaticChunkSize); // Store local chunkList in memory //long start = DateTime.Now.Ticks; chunkList_local = staticChunker.GetCurrentChunkList(filePath); // if doing other class that implements the IChunk interface //long end = DateTime.Now.Ticks; // Console.WriteLine("time taken : " + (double)((double)(end - start) / (double)10000000)); if (logger != null) { logger.Log("End Synchronizer Fill Local ChunkList"); } // structuredLog("I", "Number of chunks locally: " + chunkList_local.Count); // Figure out the changes if (logger != null) { logger.Log("Start Synchronizer ChunkList Compare"); } List <ChunkInfo> chunkList_toUpload = staticChunker.GetUploadChunkList(chunkList_local, chunkList_cloud); // structuredLog("I", "Number of chunks on cloud blob: " + chunkList_cloud.Count); // structuredLog("I", "Number of chunks to be uploaded: " + chunkList_toUpload.Count); if (logger != null) { logger.Log("End Synchronizer ChunkList Compare"); } if (logger != null) { logger.Log("Start Synchronizer Upload Multiple Chunks"); } UploadChunkList(ref chunkList_toUpload, filePath, blobName); if (logger != null) { logger.Log("End Synchronizer Upload Multiple Chunks"); } // structuredLog("I", "Number of chunks uploaded: " + chunkList_toUpload.Count); if (logger != null) { logger.Log("Start Synchronizer Commit BlockList"); } // Commit the ordered blocklist if (chunkList_toUpload.Count > 0)// write new metadata, and putblocklist() if we uploaded some chunks { CloudBlockBlob blockBlob = GetBlockBlobReference(blobName); List <int> blockIDCommitList = GetBlockIDList(ref chunkList_local, ref chunkList_cloud, ref chunkList_toUpload); long startt = DateTime.Now.Ticks; blockBlob.PutBlockList(EncodeBlockList(blockIDCommitList), GetBlobRequestOptions()); long endt = DateTime.Now.Ticks; if (logger != null) { logger.Log("PUTBLOCK LIST : " + (double)((double)(endt - startt) / (double)10000000)); } } if (logger != null) { logger.Log("End Synchronizer Commit BlockList"); } // Upload the local chunklist to the cloud if (logger != null) { logger.Log("Start Synchronizer ChunkList Upload"); } string json = JsonConvert.SerializeObject(new FileMD(StaticChunkSize, chunkList_local, SyncFactory.GetCompressionTypeAsString(this.chunkCompressionType), SyncFactory.GetEncryptionTypeAsString(this.chunkEncryptionType)), new KeyValuePairConverter()); if (chunkList_toUpload.Count > 0 || chunkList_local.Count == 0) //upload new chunk list only if we uploaded some new chunks, or if this is a new stream, with no data in stream.dat { chunkMDblockBlob.UploadText(json); } SHA1 sha1 = new SHA1CryptoServiceProvider(); byte[] ret = sha1.ComputeHash(Encoding.ASCII.GetBytes(json)); if (logger != null) { logger.Log("End Synchronizer ChunkList Upload"); } return(ret); } catch (Exception e) { structuredLog("E", " . UploadFileAsChunks: " + e); return(null); } }
public byte[] UploadFileAsChunks(string filePath) { string s3objectName; List <ChunkInfo> chunkList_cloud = new List <ChunkInfo>();; // list of chunk indexed by chunk-index (e.g. 0, 1, 2,....) List <ChunkInfo> chunkList_local; // list of chunk indexed by chunk-index (e.g. 0, 1, 2,....) try { if (logger != null) { logger.Log("Start Synchronizer Check Blob Exists"); } s3objectName = Path.GetFileName(filePath); bool s3ObjectExists = S3ObjectExists(ChunkMetadataObjectPrefix + s3objectName); if (logger != null) { logger.Log("End Synchronizer Check Blob Exists"); } if (s3ObjectExists) { if (logger != null) { logger.Log("Start Synchronizer Fill Remote ChunkList"); } GetObjectRequest request = new GetObjectRequest(); request.WithBucketName(bucketName); request.WithKey(ChunkMetadataObjectPrefix + s3objectName); GetObjectResponse response = amazonS3Client.GetObject(request); StreamReader reader = new StreamReader(response.ResponseStream); string chunkMD_JSON = reader.ReadToEnd(); FileMD fileMD = JsonConvert.DeserializeObject <FileMD>(chunkMD_JSON); StaticChunkSize = fileMD.StaticChunkSize; chunkList_cloud = fileMD.ChunkList; if (logger != null) { logger.Log("End Synchronizer Fill Remote ChunkList"); } chunkCompressionType = SyncFactory.GetCompressionType(fileMD.compressionType); chunkEncryptionType = SyncFactory.GetEncryptionType(fileMD.encryptionType); } if (logger != null) { logger.Log("Start Synchronizer Fill Local ChunkList"); } StaticChunk staticChunker = new StaticChunk(StaticChunkSize); chunkList_local = staticChunker.GetCurrentChunkList(filePath); // if doing other class that implements the IChunk interface // structuredLog("I", "Number of chunks locally: " + chunkList_local.Count); if (logger != null) { logger.Log("End Synchronizer Fill Local ChunkList"); } if (logger != null) { logger.Log("Start Synchronizer ChunkList Compare"); } List <ChunkInfo> chunkList_toUpload = staticChunker.GetUploadChunkList(chunkList_local, chunkList_cloud); // structuredLog("I", "Number of chunks on cloud blob: " + chunkList_cloud.Count); // structuredLog("I", "Number of chunks to be uploaded: " + chunkList_toUpload.Count); if (logger != null) { logger.Log("End Synchronizer ChunkList Compare"); } if (logger != null) { logger.Log("Start Synchronizer Upload Multiple Chunks"); } UploadChunkList(ref chunkList_toUpload, filePath, s3objectName); if (logger != null) { logger.Log("End Synchronizer Upload Multiple Chunks"); } // structuredLog("I", "Number of chunks uploaded: " + chunkList_toUpload.Count); if (logger != null) { logger.Log("Start Synchronizer ChunkList Upload"); } string json = JsonConvert.SerializeObject(new FileMD(StaticChunkSize, chunkList_local, SyncFactory.GetCompressionTypeAsString(this.chunkCompressionType), SyncFactory.GetEncryptionTypeAsString(this.chunkEncryptionType)), new KeyValuePairConverter()); if (chunkList_toUpload.Count > 0) //upload new chunk list only if we uploaded some new chunks { UploadStringToS3Object(ChunkMetadataObjectPrefix + s3objectName, json); } SHA1 sha1 = new SHA1CryptoServiceProvider(); byte[] ret = sha1.ComputeHash(Encoding.ASCII.GetBytes(json)); if (logger != null) { logger.Log("End Synchronizer ChunkList Upload"); } return(ret); } catch (Exception e) { structuredLog("E", " . UploadFileAsChunks: " + e); return(null); } }