Exemplo n.º 1
0
        private void SendIndex(long taskId, Session s /*, int budget*/)
        {
            System.Threading.Tasks.Task consumeIndexTask = System.Threading.Tasks.Task.Factory.StartNew(() => {
                DataPipeline pipeline = new DataPipeline(PipelineMode.Write, DataProcessingFlags.CCompress | DataProcessingFlags.CChecksum);
                //pipeline.Init();
                ChunkProcessor cp = new ChunkProcessor(s, pipeline, backup);
                cp.Process(indexChunk, backup.MaxChunkSize * 10);
                indexChunk.Size = pipeline.Stream.Length;
                indexChunk.AddDestination(s.ClientId);
            }, TaskCreationOptions.LongRunning);

            consumeIndexTask.ContinueWith(o => {
                Logger.Append(Severity.INFO, "Processed and sent backup index");
                backup.AddHubNotificationEvent(705, Math.Round((double)indexChunk.Size / 1024 / 1024, 1).ToString(), "");

                string synthIndexSum = indexChunk.Sum;                 // for Fulls
                if (backup.Bs.ScheduleTimes[0].Level != P2PBackup.Common.BackupLevel.Full)
                {
                    IndexManager idxManager = new IndexManager();
                    Logger.Append(Severity.INFO, "Building synthetic full index...");
                    idxManager.CreateSyntheticFullIndex(backup.RefTaskId, taskId, backup.RootDrives);
                    backup.AddHubNotificationEvent(707, "", "");
                    synthIndexSum = IndexManager.CheckSumIndex(taskId, false);                     // for synthetic backups
                }

                User.SendDoneBackup(taskId, backup.OriginalSize, backup.FinalSize, backup.TotalItems, indexChunk.Name, indexChunk.Sum, synthIndexSum, indexChunk.StorageDestinations, 100);
                Logger.Append(Severity.INFO, "Task " + taskId + " has finished. " + backup.TotalItems + " items, " + backup.TotalChunks + " chunks. Original data size=" + Math.Round((double)backup.OriginalSize / 1024 / 1024, 1) + "MB, final=" + Math.Round((double)backup.FinalSize / 1024 / 1024, 1) + "MB");
                string statsByKind = "Task " + taskId + " processed: ";
                for (int i = 0; i < 10; i++)
                {
                    statsByKind += backup.ItemsByType[i] + " " + ((FileType)i).ToString() + ", ";
                }
                Logger.Append(Severity.INFO, statsByKind);
#if DEBUG
                Logger.Append(Severity.INFO, "DataProcessorStreams statistics : checksum=" + BenchmarkStats.Instance().ChecksumTime
                              + "ms, dedup=" + BenchmarkStats.Instance().DedupTime
                              + "ms, compress=" + BenchmarkStats.Instance().CompressTime
                              + "ms, send=" + BenchmarkStats.Instance().SendTime + "ms.");
                Logger.Append(Severity.INFO, "Dedup statistics : lookups=" + BenchmarkStats.Instance().DedupLookups
                              + ", hotfound=" + BenchmarkStats.Instance().DedupHotFound
                              + ", coldfound=" + BenchmarkStats.Instance().DedupColdFound
                              + ", add=" + BenchmarkStats.Instance().DedupAdd + ".");
#endif
                User.StorageSessionReceivedEvent -= new User.StorageSessionReceivedHandler(this.SendIndex);

                //Console.WriteLine("IndexSessionReceived() : backup typre="+backup. .BackupTimes[0].Type);

                backup.AddHubNotificationEvent(706, "", "");
                backup.Terminate(true);

                BackupDoneEvent(taskId);
            }, TaskContinuationOptions.OnlyOnRanToCompletion | TaskContinuationOptions.ExecuteSynchronously
                                          | TaskContinuationOptions.NotOnFaulted | TaskContinuationOptions.NotOnCanceled);

            //consumeTask.Dispose();
        }
Exemplo n.º 2
0
        internal void SendIndex(Session s)
        {
            // Terminate and free Dedup DB before saving it to ensure all its content has been flushed to disk
            dedupIndex.Dispose();
            System.Threading.Tasks.Task consumeIndexTask = System.Threading.Tasks.Task.Factory.StartNew(() => {
                DataPipeline pipeline = new DataPipeline(PipelineMode.Write, DataProcessingFlags.CCompress | DataProcessingFlags.CChecksum);
                ChunkProcessor cp     = new ChunkProcessor(s, pipeline, backup, new CancellationToken());
                cp.Process(indexChunk, backup.BackupSet.MaxChunkSize * 10);
                indexChunk.Size = pipeline.Stream.Length;
                indexChunk.AddDestination(s.ToNode.Id);
            }, TaskCreationOptions.LongRunning);

            consumeIndexTask.ContinueWith(o => {
                Logger.Append(Severity.INFO, "Processed and sent backup index");
                backup.AddHubNotificationEvent(705, Math.Round((double)indexChunk.Size / 1024 / 1024, 1).ToString(), "");
                // set final info : index location
                backup.IndexStorageNodes = indexChunk.StorageDestinations;

                Logger.Append(Severity.INFO, "Task " + s.TaskId + " has finished. " + backup.TotalItems + " items, " + backup.TotalChunks + " chunks. Original data size=" + Math.Round((double)backup.OriginalSize / 1024 / 1024, 1) + "MB, final=" + Math.Round((double)backup.FinalSize / 1024 / 1024, 1) + "MB");
                string statsByKind = "Task " + s.TaskId + " processed: ";
                for (int i = 0; i < 10; i++)
                {
                    statsByKind += backup.ItemsByType[i] + " " + ((FileType)i).ToString() + ", ";
                }
                Logger.Append(Severity.INFO, statsByKind);

#if DEBUG
                Logger.Append(Severity.INFO, "DataProcessorStreams statistics : checksum=" + BenchmarkStats.Instance().ChecksumTime
                              + "ms, dedup=" + BenchmarkStats.Instance().DedupTime
                              + "ms, compress=" + BenchmarkStats.Instance().CompressTime
                              + "ms, send=" + BenchmarkStats.Instance().SendTime + "ms.");
                Logger.Append(Severity.INFO, "Dedup statistics : lookups=" + BenchmarkStats.Instance().DedupLookups
                              + ", hotfound=" + BenchmarkStats.Instance().DedupHotFound
                              + ", coldfound=" + BenchmarkStats.Instance().DedupColdFound
                              + ", add=" + BenchmarkStats.Instance().DedupAdd + ".");
#endif

                //User.SessionReady -= new User.StorageSessionReceivedHandler(this.SendIndex);

                backup.AddHubNotificationEvent(706, "", "");
                backup.Terminate(true);

                BackupDoneEvent(backup);
            }, TaskContinuationOptions.OnlyOnRanToCompletion
                                          | TaskContinuationOptions.ExecuteSynchronously
                                          | TaskContinuationOptions.NotOnFaulted
                                          | TaskContinuationOptions.NotOnCanceled
                                          );
            //consumeTask.Dispose();
        }
Exemplo n.º 3
0
        public bool Delete(int id)
        {
            //if (_context == null)
            var dbController = _appManager.DataController;                     //.Instance(appId: appId);

            var canDeleteResult = (dbController.Entities.CanDeleteEntity(id)); // _context.EntCommands.CanDeleteEntity(id);

            if (!canDeleteResult.Item1)
            {
                throw new Exception(canDeleteResult.Item2);
            }


            // Get the Entity describing the Pipeline and Pipeline Parts (DataSources)
            // var source = DataSource.GetInitialDataSource(appId: appId);
            var pipelineEntity = DataPipeline.GetPipelineEntity(id, _appManager.Cache);
            var dataSources    = DataPipeline.GetPipelineParts(_appManager.ZoneId, _appManager.AppId, pipelineEntity.EntityGuid);
            var metaDataSource = DataSource.GetMetaDataSource(appId: _appManager.AppId);

            // Delete Pipeline Parts
            foreach (var dataSource in dataSources)
            {
                // Delete Configuration Entities (if any)
                var dataSourceConfig = metaDataSource.GetAssignedEntities(Constants.MetadataForEntity /* .AssignmentObjectTypeEntity*/, dataSource.EntityGuid).FirstOrDefault();
                if (dataSourceConfig != null)
                {
                    dbController.Entities.DeleteEntity(dataSourceConfig.EntityId);
                }

                dbController.Entities.DeleteEntity(dataSource.EntityId);
            }

            // Delete Pipeline
            dbController.Entities.DeleteEntity(id);

            // flush cache
            SystemManager.Purge(_appManager.AppId);

            return(true);
        }
Exemplo n.º 4
0
 public PassThruTargetAdapter(DataPipeline <TRow> .RowDelegate onProcess)
 {
     OnProcess = onProcess;
 }
Exemplo n.º 5
0
        private void Consume(Session s /*, int budget*/)
        {
            // Filter client-side processing flags
            DataProcessingFlags clientFlags = DataProcessingFlags.None;

            foreach (DataProcessingFlags flag in Enum.GetValues(typeof(DataProcessingFlags)))
            {
                if ((int)flag < 512 && backup.DataFlags.HasFlag(flag))
                {
                    clientFlags |= flag;
                }
            }

            DataPipeline pipeline = new DataPipeline(PipelineMode.Write, clientFlags, this.backup.Bs.Id);

            if (backup.DataFlags.HasFlag(DataProcessingFlags.CDedup))
            {
                pipeline.StorageNode = s.ClientId;
            }
            if (backup.DataFlags.HasFlag(DataProcessingFlags.CEncrypt))
            {
                //X509Certificate2 cert = new X509Certificate2(ConfigManager.GetValue("Security.CertificateFile"), "");
                //pipeline.CryptoKey = (RSACryptoServiceProvider)cert.PublicKey.Key;
                pipeline.CryptoKey = s.CryptoKey;

                byte[] iv = new byte[16];
                Array.Copy(System.BitConverter.GetBytes(backup.TaskId), iv, 8);
                Array.Copy(System.BitConverter.GetBytes(backup.TaskId), 0, iv, 8, 8);
                pipeline.IV = iv;                 //new byte[]{Convert.ToByte(backup.TaskId)};
            }
            //pipeline.Init();
            ChunkProcessor cp = new ChunkProcessor(s, pipeline, backup);

            s.TransfertDoneEvent += new Session.TransfertDoneHandler(ManageChunkSent);

            // We transfer chunks until reaching budget or there is no more chunks to send (backup done, or severe error)
            while ((!chunkBuilderFeed.IsCompleted) && (s.Budget > 0))
            {
                if (cancellationTokenSource.IsCancellationRequested)
                {
                    s.LoggerInstance.Log(Severity.INFO, "Received cancellation request for task #" + backup.TaskId + ", stop processing...");
                    s.TransfertDoneEvent -= new Session.TransfertDoneHandler(ManageChunkSent);
                    s.SendDisconnect();
                    s.Disconnect();
                    return;
                }
                BChunk chunk = null;
                try{
                    lock (processingChunks){
                        chunk = chunkBuilderFeed.Take(cancellationTokenSource.Token);
                        s.LoggerInstance.Log(Severity.DEBUG2, "Processing chunk " + chunk.Name);
                        processingChunks.Add(chunk);
                    }
                    cp.Process(chunk, backup.MaxChunkSize);

                    /*backup.OriginalSize += chunk.OriginalSize;
                     * backup.FinalSize += chunk.Size;
                     * backup.TotalChunks ++;
                     * backup.TotalItems += chunk.Files.Count;*/
                    //if(chunk.Size > pipeline.HeaderLength)// an empty chunk doesn't count
                    //		budget--;

                    /// TODO replace waitone with a cancellationtoken-aware impl : http://msdn.microsoft.com/en-us/library/ee191552.aspx
                    //if (chunk.SentEvent.WaitOne()){ // (60000, false)){
                    chunk.SentEvent.Wait(cancellationTokenSource.Token);
                    s.LoggerInstance.Log(Severity.DEBUG2, "Processed  chunk " + chunk.Name + ", remaining budget=" + s.Budget);
                    chunk.SentEvent.Dispose();

                    /*}
                     * else{ // timeout waiting for storage node confirmation
                     *      Logger.Append(Severity.WARNING, "Timeout waiting for storage node #"+s.ClientId+" ("+s.ClientIp+") confirmation, chunk "+chunk.Name);
                     *      // TODO : but if have an error with one chunk, it's likely we will have one with next chunks too.
                     *      //		close session now instead of continuing???
                     *      try{
                     *              chunkBuilderFeed.Add(chunk, cancellationTokenSource.Token);
                     *      }
                     *      catch(InvalidOperationException){
                     *              Logger.Append(Severity.ERROR, "Timeout waiting for storage node #"+s.ClientId+" : A session error occured, unable to use a new session to process chunk (queue is closed)");
                     *              backup.AddHubNotificationEvent(811, chunk.Name, "Timeout waiting for storage node #"+s.ClientId+" : A session error occured, unable to use a new session to process chunk (queue is closed)");
                     *      }
                     * }*/
                }
                catch (System.Net.Sockets.SocketException e) {
                    // error sending to storage node. Re-add chunk to list and ask another storage session to hub.
                    Console.WriteLine("############## Produce()	: TAKE refused for chunk " + chunk.Name + ": " + e.Message + " ---- " + e.StackTrace);
                    backup.AddHubNotificationEvent(811, chunk.Name, e.Message);
                    if (chunk == null)
                    {
                        return;
                    }
                    RemoveChunk(chunk);
                    //s.Disconnect();
                    try{
                        User.AskAlternateDestination(backup.TaskId, s.ClientId);
                        chunkBuilderFeed.Add(chunk, cancellationTokenSource.Token);
                    }
                    catch (InvalidOperationException ioe) {
                        Logger.Append(Severity.ERROR, "A session error occured, unable to use a new session to process chunk (queue is closed) : " + ioe.Message);
                        backup.AddHubNotificationEvent(811, chunk.Name, "A session error occured, unable to use a new session to process chunk (queue is closed)");
                    }
                    //throw new Exception("Something went wrong with this consumer");
                }
                catch (OperationCanceledException) {
                    Logger.Append(Severity.DEBUG2, "Consumer task has been manually cancelled on purpose, stopping...");
                    s.TransfertDoneEvent -= new Session.TransfertDoneHandler(ManageChunkSent);
                    return;
                }

                /*Logger.Append(Severity.INFO, "DataProcessorStreams statistics : checksum="+BenchmarkStats.Instance().ChecksumTime
                 +"ms, dedup="+BenchmarkStats.Instance().DedupTime
                 +"ms, compress="+BenchmarkStats.Instance().CompressTime
                 +"ms, send="+BenchmarkStats.Instance().SendTime+"ms.");*/
            }
            s.TransfertDoneEvent -= new Session.TransfertDoneHandler(ManageChunkSent);
            Logger.Append(Severity.DEBUG, "Session with node #" + s.ClientId + ": processed and transferred all data chunks, unused budget=" + s.Budget);
        }
Exemplo n.º 6
0
        // TODO : split this into a dedicated consumer class, also merge ChunkProcessor in this future class
        // using a class will allow to track what the consumer is doing, and cancel it if it blocks on an empty chunksFeeds.Take (when everything has yet been processed)
        private void Consume(Session s /*, int budget*/)
        {
            if (/*chunksFeeds.IsCompleted*/ megaQueue.Count == 0 && doneBdhProducers == 0 && chunksFeeds.Count == 0)
            {
                Console.WriteLine("------------------------- CONSUME() : procducers queues marked complete and already processed, exiting");
                return;
            }

            // Filter client-side processing flags
            DataProcessingFlags clientFlags = DataProcessingFlags.None;

            foreach (DataProcessingFlags flag in Enum.GetValues(typeof(DataProcessingFlags)))
            {
                if ((int)flag < 512 && backup.BackupSet.DataFlags.HasFlag(flag))
                {
                    clientFlags |= flag;
                }
            }

            DataPipeline pipeline = new DataPipeline(PipelineMode.Write, clientFlags, this.backup.BackupSet.Id, dedupIndex);

            if (backup.BackupSet.DataFlags.HasFlag(DataProcessingFlags.CDedup))
            {
                pipeline.StorageNode = s.ToNode.Id;
            }
            if (backup.BackupSet.DataFlags.HasFlag(DataProcessingFlags.CEncrypt))
            {
                //X509Certificate2 cert = new X509Certificate2(ConfigManager.GetValue("Security.CertificateFile"), "");
                //pipeline.CryptoKey = (RSACryptoServiceProvider)cert.PublicKey.Key;
                pipeline.CryptoKey = s.CryptoKey;

                byte[] iv = new byte[16];
                Array.Copy(System.BitConverter.GetBytes(backup.Id), iv, 8);
                Array.Copy(System.BitConverter.GetBytes(backup.Id), 0, iv, 8, 8);
                pipeline.IV = iv;                 //new byte[]{Convert.ToByte(backup.TaskId)};
            }
            if (chunksFeeds.Count == 0)
            {
                return;
            }
            ChunkProcessor cp = new ChunkProcessor(s, pipeline, backup, this.cancellationTokenSource.Token);

            s.TransfertDoneEvent += new Session.TransfertDoneHandler(ManageChunkSent);
            Console.WriteLine("------------------------- Consume(" + s.Id + ") : taking chunks feed from queue which has " + chunksFeeds.Count + " elements yet to processe");
            BlockingCollection <BChunk> myFeed = chunksFeeds.Take(this.cancellationTokenSource.Token);

            Console.WriteLine("------------------------- Consume(" + s.Id + ") : got a new queue to process, yummy!");

            // We transfer chunks until reaching budget or there is no more chunks to send (backup done, or severe error)
            while ((!myFeed.IsCompleted) && (s.Budget > 0))
            {
                if (cancellationTokenSource.IsCancellationRequested)
                {
                    s.LoggerInstance.Log(Severity.INFO, "Received cancellation request for task #" + backup.Id + ", stop processing...");
                    s.TransfertDoneEvent -= new Session.TransfertDoneHandler(ManageChunkSent);
                    s.SendDisconnect();
                    s.Disconnect();
                    return;
                }
                BChunk chunk = null;
                try{
                    chunk = myFeed.Take(cancellationTokenSource.Token);

                    s.LoggerInstance.Log(Severity.TRIVIA, "Processing chunk " + chunk.Name);
                    lock (processingChunks){
                        processingChunks.Add(chunk);
                    }
                    cp.Process(chunk, backup.BackupSet.MaxChunkSize);

                    if (chunk.SentEvent.Wait(180000, cancellationTokenSource.Token))
                    {
                        s.LoggerInstance.Log(Severity.TRIVIA, "Processed  chunk " + chunk.Name + ", remaining budget=" + s.Budget);
                    }
                    else                      // timeout waiting for storage node confirmation
                    {
                        Logger.Append(Severity.WARNING, "Timeout (3 minutes) waiting for storage node #" + s.ToNode.Id + " (" + s.ToNode.IP + ") confirmation, chunk " + chunk.Name);
                        backup.AddHubNotificationEvent(811, chunk.Name, "Timeout waiting for storage node #" + s.ToNode.Id + " transfer confirmation");
                        // TODO : but if have an error with one chunk, it's likely we will have one with next chunks too.
                        //		close session now instead of continuing???
                        try{
                            myFeed.Add(chunk, cancellationTokenSource.Token);
                        }
                        catch (InvalidOperationException) {
                            Logger.Append(Severity.ERROR, "Timeout waiting for storage node #" + s.ToNode.Id + " : A session error occured, unable to use a new session to process chunk (queue is closed)");
                            backup.AddHubNotificationEvent(811, chunk.Name, "Timeout waiting for storage node #" + s.ToNode.Id + " : A session error occured, unable to use a new session to process chunk (queue is closed)");
                        }
                    }
                    if (chunk.SentEvent != null)
                    {
                        chunk.SentEvent.Dispose();
                    }
                }
                catch (System.Net.Sockets.SocketException e) {
                    // error sending to storage node. Re-add chunk to list and ask another storage session to hub.
                    Console.WriteLine("############## Produce()	: TAKE refused for chunk " + chunk.Name + ": " + e.Message + " ---- " + e.StackTrace);
                    backup.AddHubNotificationEvent(811, chunk.Name, e.Message);
                    if (chunk == null)
                    {
                        return;
                    }
                    RemoveChunk(chunk);
                    try{
                        this.StorageNeeded(s, 1, false, true);
                        myFeed.Add(chunk, cancellationTokenSource.Token);
                    }
                    catch (InvalidOperationException ioe) {
                        Logger.Append(Severity.ERROR, "A session error occured, unable to use a new session to process chunk (queue is closed) : " + ioe.Message);
                        backup.AddHubNotificationEvent(811, chunk.Name, "A session error occured, unable to use a new session to process chunk (queue is closed)");
                    }
                }
                catch (OperationCanceledException) {
                    Logger.Append(Severity.TRIVIA, "Consumer task has been manually cancelled on purpose, stopping...");
                    s.TransfertDoneEvent -= new Session.TransfertDoneHandler(ManageChunkSent);
                    return;
                }
                catch (InvalidOperationException) {
                    Logger.Append(Severity.DEBUG, "Consumer task has done processing its chunks list");
                }
            }
            if (!myFeed.IsCompleted)
            {
                // re-add this non-terminated chunk list on queue, to be processed by next session.
                Console.WriteLine("    ----------  Consumer : feed not completed, re-adding to chunksFeeds");
                chunksFeeds.Add(myFeed, cancellationTokenSource.Token);
                Console.WriteLine("    ----------  Consumer : feed not completed, re-added, chunksFeeds count=" + chunksFeeds.Count);
            }
            s.TransfertDoneEvent -= new Session.TransfertDoneHandler(ManageChunkSent);
            Logger.Append(Severity.DEBUG, "Session #" + s.Id + " with node #" + s.ToNode.Id + ": processed and transferred all data chunks, unused budget=" + s.Budget);
        }