public ReadTransferItemSource( Action<TimeSpan> wait, IDs3Client client, JobResponse initialJobResponse) : this(wait, client, -1, initialJobResponse) { }
public ReadTransferItemSource( IDs3Client client, int retryAfter, JobResponse initialJobResponse) : this(Thread.Sleep, client, retryAfter, initialJobResponse) { }
public WriteTransferItemSource( Action<TimeSpan> wait, IDs3Client client, JobResponse jobResponse) { this._wait = wait; this._client = client; this._jobResponse = jobResponse; }
public ReadTransferItemSource( Action<TimeSpan> wait, IDs3Client client, JobResponse initialJobResponse) { this._wait = wait; this._client = client; this._jobId = initialJobResponse.JobId; this._blobsRemaining = new HashSet<Blob>(Blob.Convert(initialJobResponse)); }
private void PutObject(IDs3Client client, JobObject obj, JobResponse bulkResult) { var fileToPut = File.OpenRead(testDirectoryBigFolderForMaxBlob + obj.Name); var contentStream = new PutObjectRequestStream(fileToPut, obj.Offset, obj.Length); var putObjectRequest = new PutObjectRequest( bulkResult.BucketName, obj.Name, bulkResult.JobId, obj.Offset, contentStream ); client.PutObject(putObjectRequest); fileToPut.Close(); }
private void AsyncUpload(IDs3Client client, ICollection<Guid> chunkIds, JobResponse response, JobResponse bulkResult) { Parallel.ForEach(response.ObjectLists, chunk => { if (!chunkIds.Contains(chunk.ChunkId)) return; chunkIds.Remove(chunk.ChunkId); // it is possible that if we start resending a chunk, due to the program crashing, that // some objects will already be in cache. Check to make sure that they are not, and then // send the object to Spectra S3 Parallel.ForEach(chunk, obj => { if (obj.InCache) return; PutObject(client, obj, bulkResult); }); Console.WriteLine(); }); }
public WriteTransferItemSource( IDs3Client client, JobResponse jobResponse) : this(Thread.Sleep, client, jobResponse) { }