예제 #1
0
        // splits a big file ( size > maxchunkfile) into multiple chunks
        private List <BChunk> GetBigFileChunks(IFSEntry bigFile, long filePosInChunk)
        {
            long          pos       = 0;
            long          remaining = bigFile.FileSize;
            List <BChunk> chunks    = new List <BChunk>();

            while (remaining > 0)
            {
                chunkOrder++;
                IFSEntry f     = bigFile.Clone();
                BChunk   chunk = new BChunk(this.TaskId);
                chunk.Order       = chunkOrder;
                chunk.RootDriveId = this.backupRootDrive.ID;
                f.ChunkStartPos   = 0;
                chunk.Add(f);
                chunks.Add(chunk);
                f.FileStartPos = pos;
                if (remaining > maxChunkSize)
                {
                    pos += maxChunkSize;
                }
                else
                {
                    pos += remaining;
                }
                remaining = bigFile.FileSize - pos;

                //if(remaining <0)
                Logger.Append(Severity.TRIVIA, "GetNextChunk() : splitted file " + f.SnapFullPath + " (size " + f.FileSize + ") , remaining=" + remaining + " to chunk - " + chunk.Name + " starting @ offset " + f.FileStartPos);
            }
            return(chunks);
        }
예제 #2
0
        private BChunk BuildIndexChunk()
        {
            backup.AddHubNotificationEvent(704, "", "");
            BChunk iChunk = new BChunk(/*backup.TaskId, */ backup.Index.FullName, backup.Index.FullName, backup.TaskId);            //string name, int bsid, string bPath, string snapPath)

            try{
                //iChunk.Add(FileProvider.GetFile(index.FullName));

                iChunk.Add(new MinimalFsItem(backup.Index.FullName));
                if (backup.DataFlags.HasFlag(DataProcessingFlags.CDedup))                // backup the deduplication database
                {
                    iChunk.Add(ItemProvider.GetProvider().GetItemByPath(DedupIndex.Instance().IndexDBName));
                }

                /*string sumHash;
                 * using(FileStream cksumFS = new FileStream(backup.Index.FullName, FileMode.Open, FileAccess.Read)){
                 *      sumHash = BitConverter.ToString(SHA1.Create().ComputeHash(cksumFS));
                 *      iChunk.Sum = sumHash;
                 * }*/
                iChunk.Sum = IndexManager.CheckSumIndex(backup.TaskId, (backup.Level != BackupLevel.Full));

                // register for session received, to process index transfer
                User.StorageSessionReceivedEvent += new User.StorageSessionReceivedHandler(this.SendIndex);
                User.AskIndexDest(backup.TaskId, backup.Index.Name, iChunk.Sum);
                Logger.Append(Severity.DEBUG, "Asked index destination to hub");
                return(iChunk);
            }
            catch (Exception e) {
                Logger.Append(Severity.ERROR, "Couldn't checksum index and/or ask destination to hub: " + e.Message + "---" + e.StackTrace);
                backup.AddHubNotificationEvent(808, e.Message, "");
            }
            return(null);
        }
예제 #3
0
 private void DoIndex()
 {
     while (!indexerChunksFeed.IsCompleted || !cancellationTokenSource.IsCancellationRequested)
     {
         try{
             BChunk toBeIndexed = indexerChunksFeed.Take(cancellationTokenSource.Token);
             backup.Index.AddChunk(toBeIndexed);
             Logger.Append(Severity.DEBUG2, "Added chunk " + toBeIndexed.Name + " to index");
         }
         catch (Exception e) {
             if (e is OperationCanceledException)
             {
                 Logger.Append(Severity.DEBUG2, "Indexer has been manually cancelled on purpose, stopping...");
             }
             else if (e is InvalidOperationException)
             {
                 Logger.Append(Severity.DEBUG2, "Indexer : no more chunks to index");
             }
             else
             {
                 Console.WriteLine("////// unexpected DoIndex exception : " + e.ToString());
                 throw;
             }
             return;
         }
     }
 }
예제 #4
0
        private BChunk BuildIndexChunk()
        {
            backup.AddHubNotificationEvent(704, "", "");
            BChunk iChunk = null;

            string synthIndexFullPath = null;

            if (backup.Level != P2PBackup.Common.BackupLevel.Full && backup.Level != P2PBackup.Common.BackupLevel.SnapshotOnly)
            {
                IndexManager idxManager = new IndexManager();
                Logger.Append(Severity.INFO, "Building synthetic full index...");
                synthIndexFullPath = idxManager.CreateSyntheticFullIndex(backup.ParentTrackingId, backup.Id, backup.RootDrives);
                backup.AddHubNotificationEvent(707, "", "");
                backup.SyntheticIndexSum = IndexManager.CheckSumIndex(backup.Id, false); // for synthetic backups
            }
            if (backup.Level == BackupLevel.Refresh)                                     // backup the synth index
            {
                iChunk = new BChunk(backup.Id);
                iChunk.Add(new MinimalFsItem(synthIndexFullPath));    // minimalitem because we only care the data, not the rest
            }
            else                                                      // only backup the partial index
            {
                iChunk = new BChunk(backup.Id);                       //string name, int bsid, string bPath, string snapPath)
                iChunk.Add(new MinimalFsItem(backup.Index.FullName)); // minimalitem because we only care the data, not the rest
            }

            try{
                //iChunk.Add(FileProvider.GetFile(index.FullName));
                if (backup.Level != BackupLevel.SnapshotOnly && backup.BackupSet.DataFlags.HasFlag(DataProcessingFlags.CDedup))                 // backup the deduplication database
                {
                    iChunk.Add(ItemProvider.GetProvider().GetItemByPath(dedupIndex.IndexDBName));
                    backup.DdbSum = dedupIndex.ChecksumDdb();
                }

                iChunk.Sum = IndexManager.CheckSumIndex(backup.Id, (backup.Level == BackupLevel.Refresh));
                if (backup.Level == BackupLevel.Full)
                {
                    backup.SyntheticIndexSum = iChunk.Sum;                     // for Fulls
                }
                // stop waiting for sessions used for regular data transfer...
                //User.SessionReady -= this.SessionReceived;
                // ...but re-register for session that will transfer the index
                //User.SessionReady += this.SendIndex;
                StorageNeeded(new PeerSession {
                    TaskId = backup.Id
                }, 1, true, false);
                Logger.Append(Severity.DEBUG, "Asked index destination to hub");
                return(iChunk);
            }
            catch (Exception e) {
                Logger.Append(Severity.ERROR, "Couldn't checksum index and/or ask destination to hub: " + e.Message + "---" + e.StackTrace);
                backup.AddHubNotificationEvent(808, e.Message, "");
            }
            return(null);
        }
예제 #5
0
        /// <summary>
        /// One 'Produce' task generates chunks for one BackupRootDrive (ie 1 mountpoint/filesystem).
        /// </summary>
        /// <param name='bdr'>
        /// the BackupRootDrive to scan for items
        /// </param>
        private void Produce(Queue <BackupRootDrive> queue /*, BlockingCollection<BChunk> myFeed*/)
        {
            BlockingCollection <BChunk> myFeed = new BlockingCollection <BChunk>(new ConcurrentQueue <BChunk>(), 1);

            Console.WriteLine("    ------- Producer() has " + queue.Count + " drive items in its queue");
            chunksFeeds.Add(myFeed);
            //IEnumerator<BChunk> chunkEnumerator = bdh.GetNextChunk().GetEnumerator();//backup.GetNextChunk().GetEnumerator();
            while (queue.Count > 0)
            {
                BackupRootDrive bdr = queue.Dequeue();
                Logger.Append(Severity.INFO, "Collecting items to backup for drive " + bdr.SystemDrive.MountPoint);
                BackupRootDriveHandler bdh = new BackupRootDriveHandler(bdr, this.backup.Id, backup.BackupSet.MaxChunkSize, backup.BackupSet.MaxChunkSize, backup.BackupSet.MaxChunkFiles, backup.Level, backup.RefStartDate, backup.RefEndDate, backup.ParentTrackingId);
                bdh.LogEvent           += LogReceived;
                bdh.SubCompletionEvent += new BackupRootDriveHandler.SubCompletionHandler(IncrementSubCompletion);
                foreach (P2PBackup.Common.BasePath baseP in bdr.Paths)
                {
                    bdh.SetCurrentPath(baseP);
                    IEnumerator <BChunk> chunkEnumerator = bdh.GetNextChunk().GetEnumerator();
                    while (chunkEnumerator.MoveNext() && !cancellationTokenSource.IsCancellationRequested)
                    {
                        BChunk chunk = chunkEnumerator.Current;
                        try{
                            myFeed.Add(chunk, cancellationTokenSource.Token);
                        }
                        catch (OperationCanceledException) {
                            Logger.Append(Severity.TRIVIA, "Producer has been manually cancelled on purpose, stopping...");
                            return;
                        }
                        catch (Exception e) {
                            Logger.Append(Severity.ERROR, "###################### Produce()	: add refused : " + e.Message + " ---- " + e.StackTrace);
                            return;
                        }
                        // stats
                        foreach (IFSEntry item in chunk.Items)
                        {
                            backup.ItemsByType[(int)item.Kind]++;
                        }
                        Logger.Append(Severity.DEBUG, "Basepath " + baseP.Path + " : Added chunk " + chunk.Name + " containing " + chunk.Items.Count + " items ");
                    }
                }
                bdh.SubCompletionEvent -= new BackupRootDriveHandler.SubCompletionHandler(IncrementSubCompletion);
                bdh.LogEvent           -= LogReceived;
                if (!cancellationTokenSource.IsCancellationRequested)
                {
                    ContinueProducing();
                }
                else
                {
                    bdh.Dispose();
                }
            }
            Console.WriteLine("------------------------- PRODUCE(): done collecting ALL, complete feed adding, cancellationTokenSource.IsCancellationRequested=" + cancellationTokenSource.IsCancellationRequested);
            myFeed.CompleteAdding();
        }
예제 #6
0
 /// <summary>
 /// If file is recovery-file is received SendRecoveryToHub is called
 /// </summary>
 /// <param name="received">true if file has been received</param>
 /// <param name="bFReceived">the file that has been received</param>
 private void FileReceived(bool received, BChunk chunk)
 {
     /*if (received == true){
      *      foreach (BChunk bc in backup.Chunks){
      *              if(bc.Name == chunk.Name){
      *                      bc.Fetched = true;
      *                      UpdateGUIEvent("prbRecovery", bc.Size.ToString());
      *                      SendRecoveryToHub();
      *              }
      *      }
      * }*/
 }
예제 #7
0
 private void RemoveChunk(BChunk chunkToRemove)
 {
     lock (processingChunks){
         for (int i = processingChunks.Count - 1; i >= 0; i--)
         {
             if (processingChunks[i].Name == chunkToRemove.Name)
             {
                 processingChunks.RemoveAt(i);
             }
         }
     }
 }
예제 #8
0
        /// <summary>
        /// One 'Produce' task generates chunks for one BackupRootDrive (ie 1 mountpoint).
        /// </summary>
        /// <param name='bdr'>
        /// the BackupRootDrive to scan for items
        /// </param>
        private void Produce(BackupRootDrive bdr)
        {
            Logger.Append(Severity.INFO, "Collecting items to backup for drive " + bdr.SystemDrive.MountPoint);
            BackupRootDriveHandler bdh = new BackupRootDriveHandler(bdr, this.backup.TaskId, backup.MaxChunkSize, backup.MaxChunkSize, backup.MaxChunkFiles, backup.Level, backup.RefStartDate, backup.RefEndDate, backup.RefTaskId);

            bdh.LogEvent           += LogReceived;
            bdh.SubCompletionEvent += new BackupRootDriveHandler.SubCompletionHandler(IncrementSubCompletion);

            //IEnumerator<BChunk> chunkEnumerator = bdh.GetNextChunk().GetEnumerator();//backup.GetNextChunk().GetEnumerator();

            foreach (P2PBackup.Common.BasePath baseP in bdr.Paths)
            {
                bdh.SetCurrentPath(baseP);
                IEnumerator <BChunk> chunkEnumerator = bdh.GetNextChunk().GetEnumerator();
                while (chunkEnumerator.MoveNext() && !cancellationTokenSource.IsCancellationRequested)
                {
                    BChunk chunk = chunkEnumerator.Current;
                    try{
                        chunkBuilderFeed.Add(chunk, cancellationTokenSource.Token);
                    }
                    catch (OperationCanceledException) {
                        Logger.Append(Severity.DEBUG2, "Producer has been manually cancelled on purpose, stopping...");
                        return;
                    }
                    catch (Exception e) {
                        Logger.Append(Severity.ERROR, "###################### Produce()	: add refused : " + e.Message + " ---- " + e.StackTrace);
                        return;
                    }
                    // stats
                    foreach (IFSEntry item in chunk.Files)
                    {
                        backup.ItemsByType[(int)item.Kind]++;
                    }
                    Logger.Append(Severity.DEBUG, "Added chunk " + chunk.Name + " containing " + chunk.Files.Count + " items ");
                }
            }
            bdh.SubCompletionEvent -= new BackupRootDriveHandler.SubCompletionHandler(IncrementSubCompletion);
            bdh.LogEvent           -= LogReceived;
            Logger.Append(Severity.INFO, "Producer has done collecting items to backup for drive " + bdr.SystemDrive.MountPoint);
            if (!cancellationTokenSource.IsCancellationRequested)
            {
                ContinueProducing();
            }
            else
            {
                bdh.Dispose();
            }
        }
예제 #9
0
        private void ProcessIndex()
        {
            //if(cancellationTokenSource.IsCancellationRequested) return;

            //indexProcessing = true;
            if (backup.DataFlags.HasFlag(DataProcessingFlags.CDedup)
                /*	&& !cancellationTokenSource.IsCancellationRequested*/)
            {
                // save dedup and process index even if task is cancelled (for cleaning purposes)
                try{
                    DedupIndex.Instance().Persist();
                }
                catch (Exception _e) {
                    Logger.Append(Severity.ERROR, "Could not save deduplication indexes DB, backup data is therefore invalid. TODO: Report!!! : " + _e.Message + " ---- " + _e.StackTrace);
                    backup.AddHubNotificationEvent(809, DedupIndex.Instance().IndexDBName, _e.Message);
                }
            }
            // now we have to send backup index and dedup index
            backup.Index.Terminate();
            //backup.Index = null;
            indexChunk = BuildIndexChunk();
        }
예제 #10
0
 private void ProcessIndex()
 {
     if (backup.Level != BackupLevel.SnapshotOnly && backup.BackupSet.DataFlags.HasFlag(DataProcessingFlags.CDedup)
         /*	&& !cancellationTokenSource.IsCancellationRequested*/)
     {
         // save dedup and process index even if task is cancelled (for cleaning purposes)
         try{
             dedupIndex.Commit();
             dedupIndex.Dispose();
         }
         catch (Exception _e) {
             Logger.Append(Severity.ERROR, "Could not save deduplication indexes DB, backup data is therefore invalid. TODO: Report!!! : " + _e.Message + " ---- " + _e.StackTrace);
             backup.AddHubNotificationEvent(809, dedupIndex.IndexDBName, _e.Message);
         }
     }
     // now we have to send backup index and dedup index
     backup.Index.Terminate();
     indexChunk       = BuildIndexChunk();
     backup.IndexName = indexChunk.Name;
     backup.IndexSum  = indexChunk.Sum;
     backup.IndexSize = indexChunk.OriginalSize;
 }
예제 #11
0
        private void Consume(Session s /*, int budget*/)
        {
            // Filter client-side processing flags
            DataProcessingFlags clientFlags = DataProcessingFlags.None;

            foreach (DataProcessingFlags flag in Enum.GetValues(typeof(DataProcessingFlags)))
            {
                if ((int)flag < 512 && backup.DataFlags.HasFlag(flag))
                {
                    clientFlags |= flag;
                }
            }

            DataPipeline pipeline = new DataPipeline(PipelineMode.Write, clientFlags, this.backup.Bs.Id);

            if (backup.DataFlags.HasFlag(DataProcessingFlags.CDedup))
            {
                pipeline.StorageNode = s.ClientId;
            }
            if (backup.DataFlags.HasFlag(DataProcessingFlags.CEncrypt))
            {
                //X509Certificate2 cert = new X509Certificate2(ConfigManager.GetValue("Security.CertificateFile"), "");
                //pipeline.CryptoKey = (RSACryptoServiceProvider)cert.PublicKey.Key;
                pipeline.CryptoKey = s.CryptoKey;

                byte[] iv = new byte[16];
                Array.Copy(System.BitConverter.GetBytes(backup.TaskId), iv, 8);
                Array.Copy(System.BitConverter.GetBytes(backup.TaskId), 0, iv, 8, 8);
                pipeline.IV = iv;                 //new byte[]{Convert.ToByte(backup.TaskId)};
            }
            //pipeline.Init();
            ChunkProcessor cp = new ChunkProcessor(s, pipeline, backup);

            s.TransfertDoneEvent += new Session.TransfertDoneHandler(ManageChunkSent);

            // We transfer chunks until reaching budget or there is no more chunks to send (backup done, or severe error)
            while ((!chunkBuilderFeed.IsCompleted) && (s.Budget > 0))
            {
                if (cancellationTokenSource.IsCancellationRequested)
                {
                    s.LoggerInstance.Log(Severity.INFO, "Received cancellation request for task #" + backup.TaskId + ", stop processing...");
                    s.TransfertDoneEvent -= new Session.TransfertDoneHandler(ManageChunkSent);
                    s.SendDisconnect();
                    s.Disconnect();
                    return;
                }
                BChunk chunk = null;
                try{
                    lock (processingChunks){
                        chunk = chunkBuilderFeed.Take(cancellationTokenSource.Token);
                        s.LoggerInstance.Log(Severity.DEBUG2, "Processing chunk " + chunk.Name);
                        processingChunks.Add(chunk);
                    }
                    cp.Process(chunk, backup.MaxChunkSize);

                    /*backup.OriginalSize += chunk.OriginalSize;
                     * backup.FinalSize += chunk.Size;
                     * backup.TotalChunks ++;
                     * backup.TotalItems += chunk.Files.Count;*/
                    //if(chunk.Size > pipeline.HeaderLength)// an empty chunk doesn't count
                    //		budget--;

                    /// TODO replace waitone with a cancellationtoken-aware impl : http://msdn.microsoft.com/en-us/library/ee191552.aspx
                    //if (chunk.SentEvent.WaitOne()){ // (60000, false)){
                    chunk.SentEvent.Wait(cancellationTokenSource.Token);
                    s.LoggerInstance.Log(Severity.DEBUG2, "Processed  chunk " + chunk.Name + ", remaining budget=" + s.Budget);
                    chunk.SentEvent.Dispose();

                    /*}
                     * else{ // timeout waiting for storage node confirmation
                     *      Logger.Append(Severity.WARNING, "Timeout waiting for storage node #"+s.ClientId+" ("+s.ClientIp+") confirmation, chunk "+chunk.Name);
                     *      // TODO : but if have an error with one chunk, it's likely we will have one with next chunks too.
                     *      //		close session now instead of continuing???
                     *      try{
                     *              chunkBuilderFeed.Add(chunk, cancellationTokenSource.Token);
                     *      }
                     *      catch(InvalidOperationException){
                     *              Logger.Append(Severity.ERROR, "Timeout waiting for storage node #"+s.ClientId+" : A session error occured, unable to use a new session to process chunk (queue is closed)");
                     *              backup.AddHubNotificationEvent(811, chunk.Name, "Timeout waiting for storage node #"+s.ClientId+" : A session error occured, unable to use a new session to process chunk (queue is closed)");
                     *      }
                     * }*/
                }
                catch (System.Net.Sockets.SocketException e) {
                    // error sending to storage node. Re-add chunk to list and ask another storage session to hub.
                    Console.WriteLine("############## Produce()	: TAKE refused for chunk " + chunk.Name + ": " + e.Message + " ---- " + e.StackTrace);
                    backup.AddHubNotificationEvent(811, chunk.Name, e.Message);
                    if (chunk == null)
                    {
                        return;
                    }
                    RemoveChunk(chunk);
                    //s.Disconnect();
                    try{
                        User.AskAlternateDestination(backup.TaskId, s.ClientId);
                        chunkBuilderFeed.Add(chunk, cancellationTokenSource.Token);
                    }
                    catch (InvalidOperationException ioe) {
                        Logger.Append(Severity.ERROR, "A session error occured, unable to use a new session to process chunk (queue is closed) : " + ioe.Message);
                        backup.AddHubNotificationEvent(811, chunk.Name, "A session error occured, unable to use a new session to process chunk (queue is closed)");
                    }
                    //throw new Exception("Something went wrong with this consumer");
                }
                catch (OperationCanceledException) {
                    Logger.Append(Severity.DEBUG2, "Consumer task has been manually cancelled on purpose, stopping...");
                    s.TransfertDoneEvent -= new Session.TransfertDoneHandler(ManageChunkSent);
                    return;
                }

                /*Logger.Append(Severity.INFO, "DataProcessorStreams statistics : checksum="+BenchmarkStats.Instance().ChecksumTime
                 +"ms, dedup="+BenchmarkStats.Instance().DedupTime
                 +"ms, compress="+BenchmarkStats.Instance().CompressTime
                 +"ms, send="+BenchmarkStats.Instance().SendTime+"ms.");*/
            }
            s.TransfertDoneEvent -= new Session.TransfertDoneHandler(ManageChunkSent);
            Logger.Append(Severity.DEBUG, "Session with node #" + s.ClientId + ": processed and transferred all data chunks, unused budget=" + s.Budget);
        }
예제 #12
0
        internal IEnumerable <BChunk> GetNextChunk()
        {
            chunkOrder++;
            BChunk chunk = new BChunk(this.TaskId);

            chunk.Order       = chunkOrder;
            chunk.RootDriveId = this.backupRootDrive.ID;
            uint filePosInChunk = 0;
            long currentSize    = 0;

            while (itemIterator.MoveNext())
            {
                //foreach(IFSEntry ent in GetFilesToBackup()){

                IFSEntry ent = itemIterator.Current;
                if (ent == null)
                {
                    continue;
                }


                // 1st case : we can add more files to the chunk
                if (ent.FileSize < maxChunkSize)
                {
                    //try{
                    filePosInChunk += (uint)ent.FileSize;
                    //IFSEntry f = ent;
                    ent.ChunkStartPos = filePosInChunk;
                    ent.FileStartPos  = 0;
                    chunk.Add(ent);
                    currentSize += ent.FileSize;
                    //Console.WriteLine("GetNextChunk() : added new file to chunk - "+itemIterator.Current.FileName);
                    //}
                    //catch(Exception e){
                    //	Logger.Append(Severity.ERROR, "Could not add file "+itemIterator.Current.SnapFullPath+" : "+e.Message);
                    //}
                }
                //2nd case : a file is too big to fit into one chunk, split it
                else
                {
                    if (chunk.Items.Count > 0)
                    {
                        yield return(chunk);
                    }

                    /*chunk = new BChunk(currentPath.Path, snapshottedPath, this.TaskId);
                     * chunk.Order = chunkOrder;
                     * chunk.RootDriveId = this.backupRootDrive.ID;
                     * filePosInChunk = 0;
                     * currentSize = 0;*/
                    foreach (BChunk bigFileChunk in GetBigFileChunks(itemIterator.Current, filePosInChunk))
                    {
                        yield return(bigFileChunk);
                    }
                    chunkOrder++;
                    chunk             = new BChunk(this.TaskId);
                    chunk.Order       = chunkOrder;
                    chunk.RootDriveId = this.backupRootDrive.ID;
                    filePosInChunk    = 0;
                    currentSize       = 0;
                }
                // 3rd case : if a chunk reaches its max packSize, we create another one
                if (currentSize > maxChunkSize || chunk.Items.Count > 0 && currentSize > maxPackSize
                    /*|| currentSize == 0 && chunk.Files.Count ==0 */
                    || chunk.Items.Count > maxChunkFiles)
                {
                    //Console.WriteLine("GetNextChunk() : chunk reached max files or max size:currentsize="+currentSize+"/"+maxChunkSize
                    //          +",chunkfilescount="+chunk.Files.Count+"/"+maxChunkFiles);
                    yield return(chunk);

                    chunkOrder++;
                    chunk             = new BChunk(this.TaskId);
                    chunk.Order       = chunkOrder;
                    chunk.RootDriveId = this.backupRootDrive.ID;
                    filePosInChunk    = 0;
                    currentSize       = 0;
                }
            }
            //4th case : // done processing file list but chunk not complete
            Logger.Append(Severity.TRIVIA, "GetNextChunk() : Done gathering files inside '" + snapshottedPath + "' without reaching chunk max size. " + chunk.Items.Count + " files, " + currentSize / 1024 + "k");
            //if(currentSize > 0){
            yield return(chunk);
            //itemIterator = GetFilesToBackup().GetEnumerator();
            //yield break;
            //}
        }
예제 #13
0
        // TODO : split this into a dedicated consumer class, also merge ChunkProcessor in this future class
        // using a class will allow to track what the consumer is doing, and cancel it if it blocks on an empty chunksFeeds.Take (when everything has yet been processed)
        private void Consume(Session s /*, int budget*/)
        {
            if (/*chunksFeeds.IsCompleted*/ megaQueue.Count == 0 && doneBdhProducers == 0 && chunksFeeds.Count == 0)
            {
                Console.WriteLine("------------------------- CONSUME() : procducers queues marked complete and already processed, exiting");
                return;
            }

            // Filter client-side processing flags
            DataProcessingFlags clientFlags = DataProcessingFlags.None;

            foreach (DataProcessingFlags flag in Enum.GetValues(typeof(DataProcessingFlags)))
            {
                if ((int)flag < 512 && backup.BackupSet.DataFlags.HasFlag(flag))
                {
                    clientFlags |= flag;
                }
            }

            DataPipeline pipeline = new DataPipeline(PipelineMode.Write, clientFlags, this.backup.BackupSet.Id, dedupIndex);

            if (backup.BackupSet.DataFlags.HasFlag(DataProcessingFlags.CDedup))
            {
                pipeline.StorageNode = s.ToNode.Id;
            }
            if (backup.BackupSet.DataFlags.HasFlag(DataProcessingFlags.CEncrypt))
            {
                //X509Certificate2 cert = new X509Certificate2(ConfigManager.GetValue("Security.CertificateFile"), "");
                //pipeline.CryptoKey = (RSACryptoServiceProvider)cert.PublicKey.Key;
                pipeline.CryptoKey = s.CryptoKey;

                byte[] iv = new byte[16];
                Array.Copy(System.BitConverter.GetBytes(backup.Id), iv, 8);
                Array.Copy(System.BitConverter.GetBytes(backup.Id), 0, iv, 8, 8);
                pipeline.IV = iv;                 //new byte[]{Convert.ToByte(backup.TaskId)};
            }
            if (chunksFeeds.Count == 0)
            {
                return;
            }
            ChunkProcessor cp = new ChunkProcessor(s, pipeline, backup, this.cancellationTokenSource.Token);

            s.TransfertDoneEvent += new Session.TransfertDoneHandler(ManageChunkSent);
            Console.WriteLine("------------------------- Consume(" + s.Id + ") : taking chunks feed from queue which has " + chunksFeeds.Count + " elements yet to processe");
            BlockingCollection <BChunk> myFeed = chunksFeeds.Take(this.cancellationTokenSource.Token);

            Console.WriteLine("------------------------- Consume(" + s.Id + ") : got a new queue to process, yummy!");

            // We transfer chunks until reaching budget or there is no more chunks to send (backup done, or severe error)
            while ((!myFeed.IsCompleted) && (s.Budget > 0))
            {
                if (cancellationTokenSource.IsCancellationRequested)
                {
                    s.LoggerInstance.Log(Severity.INFO, "Received cancellation request for task #" + backup.Id + ", stop processing...");
                    s.TransfertDoneEvent -= new Session.TransfertDoneHandler(ManageChunkSent);
                    s.SendDisconnect();
                    s.Disconnect();
                    return;
                }
                BChunk chunk = null;
                try{
                    chunk = myFeed.Take(cancellationTokenSource.Token);

                    s.LoggerInstance.Log(Severity.TRIVIA, "Processing chunk " + chunk.Name);
                    lock (processingChunks){
                        processingChunks.Add(chunk);
                    }
                    cp.Process(chunk, backup.BackupSet.MaxChunkSize);

                    if (chunk.SentEvent.Wait(180000, cancellationTokenSource.Token))
                    {
                        s.LoggerInstance.Log(Severity.TRIVIA, "Processed  chunk " + chunk.Name + ", remaining budget=" + s.Budget);
                    }
                    else                      // timeout waiting for storage node confirmation
                    {
                        Logger.Append(Severity.WARNING, "Timeout (3 minutes) waiting for storage node #" + s.ToNode.Id + " (" + s.ToNode.IP + ") confirmation, chunk " + chunk.Name);
                        backup.AddHubNotificationEvent(811, chunk.Name, "Timeout waiting for storage node #" + s.ToNode.Id + " transfer confirmation");
                        // TODO : but if have an error with one chunk, it's likely we will have one with next chunks too.
                        //		close session now instead of continuing???
                        try{
                            myFeed.Add(chunk, cancellationTokenSource.Token);
                        }
                        catch (InvalidOperationException) {
                            Logger.Append(Severity.ERROR, "Timeout waiting for storage node #" + s.ToNode.Id + " : A session error occured, unable to use a new session to process chunk (queue is closed)");
                            backup.AddHubNotificationEvent(811, chunk.Name, "Timeout waiting for storage node #" + s.ToNode.Id + " : A session error occured, unable to use a new session to process chunk (queue is closed)");
                        }
                    }
                    if (chunk.SentEvent != null)
                    {
                        chunk.SentEvent.Dispose();
                    }
                }
                catch (System.Net.Sockets.SocketException e) {
                    // error sending to storage node. Re-add chunk to list and ask another storage session to hub.
                    Console.WriteLine("############## Produce()	: TAKE refused for chunk " + chunk.Name + ": " + e.Message + " ---- " + e.StackTrace);
                    backup.AddHubNotificationEvent(811, chunk.Name, e.Message);
                    if (chunk == null)
                    {
                        return;
                    }
                    RemoveChunk(chunk);
                    try{
                        this.StorageNeeded(s, 1, false, true);
                        myFeed.Add(chunk, cancellationTokenSource.Token);
                    }
                    catch (InvalidOperationException ioe) {
                        Logger.Append(Severity.ERROR, "A session error occured, unable to use a new session to process chunk (queue is closed) : " + ioe.Message);
                        backup.AddHubNotificationEvent(811, chunk.Name, "A session error occured, unable to use a new session to process chunk (queue is closed)");
                    }
                }
                catch (OperationCanceledException) {
                    Logger.Append(Severity.TRIVIA, "Consumer task has been manually cancelled on purpose, stopping...");
                    s.TransfertDoneEvent -= new Session.TransfertDoneHandler(ManageChunkSent);
                    return;
                }
                catch (InvalidOperationException) {
                    Logger.Append(Severity.DEBUG, "Consumer task has done processing its chunks list");
                }
            }
            if (!myFeed.IsCompleted)
            {
                // re-add this non-terminated chunk list on queue, to be processed by next session.
                Console.WriteLine("    ----------  Consumer : feed not completed, re-adding to chunksFeeds");
                chunksFeeds.Add(myFeed, cancellationTokenSource.Token);
                Console.WriteLine("    ----------  Consumer : feed not completed, re-added, chunksFeeds count=" + chunksFeeds.Count);
            }
            s.TransfertDoneEvent -= new Session.TransfertDoneHandler(ManageChunkSent);
            Logger.Append(Severity.DEBUG, "Session #" + s.Id + " with node #" + s.ToNode.Id + ": processed and transferred all data chunks, unused budget=" + s.Budget);
        }