protected override void ProcessChunkOutput(OutputWrapper output, int index, int count, IEnumerable <DataPiece> pieces) { if (!output.HasRawStream) { throw new ArgumentException("Payload cannot be written to standard output"); } var chunkQuery = new UploadDataQuery(); chunkQuery.Package = new DataPackage { Pieces = pieces.ToList() }; chunkQuery.SecretHash = Crypto.GenerateSecret().ToSha512Hash(); if (Parameters.GZip) { chunkQuery.Compression = CompressionPolicy.RequestGZip; } else { chunkQuery.Compression = CompressionPolicy.Disabled; } chunkQuery.OverrideHttpClient = new HttpClient(new WriteOutHttpMessageHandler(output.RawStream)); chunkQuery.Execute(CancellationToken.None).Wait(); }
protected override void ProcessChunk(int index, int count, IEnumerable <DataPiece> pieces) { var query = new UploadDataQuery(); query.Package = new DataPackage { Pieces = pieces.ToList() }; query.SecretHash = Crypto.GenerateSecret().ToSha512Hash(); if (Parameters.DisableCompression) { query.Compression = CompressionPolicy.Disabled; } query.Service = Parameters.EndPoint; query.Execute(CancellationToken.None).Wait(); }
/// <summary> /// Inner worker synchronizer. /// Will never throw for normal sync operations, even in case of failure. /// </summary> /// <exception cref="TaskCanceledException">Thrown when sync canceled.</exception> private async Task <SyncResult> SynchronizeInner(CancellationToken token, SyncPolicy policy) { token.ThrowIfCancellationRequested(); IList <DatabaseQueries.TrackAndCount> tracks = await Task.Run(() => { using (var db = DatabaseUtility.OpenConnection()) { return((from t in db.GetAllPendingTracks() let trackFilepath = FileNaming.GetDataTrackFilepath(t.TrackId) where File.Exists(trackFilepath) select t).ToList()); } }); if (tracks.Count == 0) { Log.Debug("No files to synchronize"); return(new SyncResult(0, 0)); } token.ThrowIfCancellationRequested(); Log.Debug("{0} tracks queued to synchronize", tracks.Count); Log.Event("Sync.start", new Dictionary <string, string>() { { "policy", policy.ToString() } }); if (policy == SyncPolicy.ForceLast && tracks.Count > 1) { Log.Debug("Constraining upload to most recent file as per policy"); tracks = new DatabaseQueries.TrackAndCount[] { tracks.First() }; } int countUploadedPoints = 0; int countUploadedChunks = 0; foreach (var track in tracks) { token.ThrowIfCancellationRequested(); int pendingPoints = track.DataCount - track.UploadedCount; Log.Debug("Uploading {0}/{1} points from track {2}", pendingPoints, track.DataCount, track.TrackId); try { var reader = new DataReader(track.TrackId); if (!await reader.Skip(track.UploadedCount)) { Log.Error(null, "Cannot advance {0} rows in file for track {1}", track.UploadedCount, track.TrackId); continue; } int currentChunk = 0; while (pendingPoints > 0) { int chunkPoints = Math.Min(ChunkSize, pendingPoints); Log.Debug("Processing chunk {0} with {1} points", currentChunk + 1, chunkPoints); var package = new List <DataPiece>(chunkPoints); for (int p = 0; p < chunkPoints; ++p) { if (!await reader.Advance()) { throw new Exception(string.Format("Cannot read line for {0}th point", p + 1)); } package.Add(new DataPiece { TrackId = track.TrackId, StartTimestamp = new DateTime(reader.Current.StartTicks), EndTimestamp = new DateTime(reader.Current.EndTicks), Ppe = reader.Current.Ppe, PpeX = reader.Current.PpeX, PpeY = reader.Current.PpeY, PpeZ = reader.Current.PpeZ, Latitude = reader.Current.Latitude, Longitude = reader.Current.Longitude, Bearing = reader.Current.Bearing, Accuracy = reader.Current.Accuracy, Vehicle = track.VehicleType, Anchorage = track.AnchorageType, NumberOfPeople = track.NumberOfPeople }); } var secret = Crypto.GenerateSecret(); var secretHash = secret.ToSha512Hash(); var uploadQuery = new UploadDataQuery { Package = package, SecretHash = secretHash }; var response = await uploadQuery.Execute(token); Log.Debug("Points uploaded successfully, chunk {0} for track ID {1}", currentChunk + 1, track.TrackId); //Store record of uploaded chunk using (var db = DatabaseUtility.OpenConnection()) { var record = new TrackUploadRecord { TrackId = track.TrackId, UploadedId = response.UploadedTrackId, Secret = secret, UploadedOn = DateTime.UtcNow, Count = chunkPoints }; db.Insert(record); } pendingPoints -= chunkPoints; currentChunk++; countUploadedPoints += chunkPoints; countUploadedChunks++; } } catch (IOException exIo) { Log.Error(exIo, "File for track {0} not found", track.TrackId); } catch (Exception ex) { Log.Error(ex, "Failed while processing track {0}", track.TrackId); } } return(new SyncResult(countUploadedPoints, countUploadedChunks)); }