Exemple #1
0
 internal DataPipeline(PipelineMode mode, DataProcessingFlags flags, int bsId, DedupIndex ddidx) : this(mode, flags)
 {
     this.bsId = bsId;
     ddb       = ddidx;
 }
Exemple #2
0
 internal DataPipeline(PipelineMode mode, DataProcessingFlags flags)
 {
     this.Flags = flags;
     this.Mode  = mode;
 }
Exemple #3
0
        private void Consume(Session s /*, int budget*/)
        {
            // Filter client-side processing flags
            DataProcessingFlags clientFlags = DataProcessingFlags.None;

            foreach (DataProcessingFlags flag in Enum.GetValues(typeof(DataProcessingFlags)))
            {
                if ((int)flag < 512 && backup.DataFlags.HasFlag(flag))
                {
                    clientFlags |= flag;
                }
            }

            DataPipeline pipeline = new DataPipeline(PipelineMode.Write, clientFlags, this.backup.Bs.Id);

            if (backup.DataFlags.HasFlag(DataProcessingFlags.CDedup))
            {
                pipeline.StorageNode = s.ClientId;
            }
            if (backup.DataFlags.HasFlag(DataProcessingFlags.CEncrypt))
            {
                //X509Certificate2 cert = new X509Certificate2(ConfigManager.GetValue("Security.CertificateFile"), "");
                //pipeline.CryptoKey = (RSACryptoServiceProvider)cert.PublicKey.Key;
                pipeline.CryptoKey = s.CryptoKey;

                byte[] iv = new byte[16];
                Array.Copy(System.BitConverter.GetBytes(backup.TaskId), iv, 8);
                Array.Copy(System.BitConverter.GetBytes(backup.TaskId), 0, iv, 8, 8);
                pipeline.IV = iv;                 //new byte[]{Convert.ToByte(backup.TaskId)};
            }
            //pipeline.Init();
            ChunkProcessor cp = new ChunkProcessor(s, pipeline, backup);

            s.TransfertDoneEvent += new Session.TransfertDoneHandler(ManageChunkSent);

            // We transfer chunks until reaching budget or there is no more chunks to send (backup done, or severe error)
            while ((!chunkBuilderFeed.IsCompleted) && (s.Budget > 0))
            {
                if (cancellationTokenSource.IsCancellationRequested)
                {
                    s.LoggerInstance.Log(Severity.INFO, "Received cancellation request for task #" + backup.TaskId + ", stop processing...");
                    s.TransfertDoneEvent -= new Session.TransfertDoneHandler(ManageChunkSent);
                    s.SendDisconnect();
                    s.Disconnect();
                    return;
                }
                BChunk chunk = null;
                try{
                    lock (processingChunks){
                        chunk = chunkBuilderFeed.Take(cancellationTokenSource.Token);
                        s.LoggerInstance.Log(Severity.DEBUG2, "Processing chunk " + chunk.Name);
                        processingChunks.Add(chunk);
                    }
                    cp.Process(chunk, backup.MaxChunkSize);

                    /*backup.OriginalSize += chunk.OriginalSize;
                     * backup.FinalSize += chunk.Size;
                     * backup.TotalChunks ++;
                     * backup.TotalItems += chunk.Files.Count;*/
                    //if(chunk.Size > pipeline.HeaderLength)// an empty chunk doesn't count
                    //		budget--;

                    /// TODO replace waitone with a cancellationtoken-aware impl : http://msdn.microsoft.com/en-us/library/ee191552.aspx
                    //if (chunk.SentEvent.WaitOne()){ // (60000, false)){
                    chunk.SentEvent.Wait(cancellationTokenSource.Token);
                    s.LoggerInstance.Log(Severity.DEBUG2, "Processed  chunk " + chunk.Name + ", remaining budget=" + s.Budget);
                    chunk.SentEvent.Dispose();

                    /*}
                     * else{ // timeout waiting for storage node confirmation
                     *      Logger.Append(Severity.WARNING, "Timeout waiting for storage node #"+s.ClientId+" ("+s.ClientIp+") confirmation, chunk "+chunk.Name);
                     *      // TODO : but if have an error with one chunk, it's likely we will have one with next chunks too.
                     *      //		close session now instead of continuing???
                     *      try{
                     *              chunkBuilderFeed.Add(chunk, cancellationTokenSource.Token);
                     *      }
                     *      catch(InvalidOperationException){
                     *              Logger.Append(Severity.ERROR, "Timeout waiting for storage node #"+s.ClientId+" : A session error occured, unable to use a new session to process chunk (queue is closed)");
                     *              backup.AddHubNotificationEvent(811, chunk.Name, "Timeout waiting for storage node #"+s.ClientId+" : A session error occured, unable to use a new session to process chunk (queue is closed)");
                     *      }
                     * }*/
                }
                catch (System.Net.Sockets.SocketException e) {
                    // error sending to storage node. Re-add chunk to list and ask another storage session to hub.
                    Console.WriteLine("############## Produce()	: TAKE refused for chunk " + chunk.Name + ": " + e.Message + " ---- " + e.StackTrace);
                    backup.AddHubNotificationEvent(811, chunk.Name, e.Message);
                    if (chunk == null)
                    {
                        return;
                    }
                    RemoveChunk(chunk);
                    //s.Disconnect();
                    try{
                        User.AskAlternateDestination(backup.TaskId, s.ClientId);
                        chunkBuilderFeed.Add(chunk, cancellationTokenSource.Token);
                    }
                    catch (InvalidOperationException ioe) {
                        Logger.Append(Severity.ERROR, "A session error occured, unable to use a new session to process chunk (queue is closed) : " + ioe.Message);
                        backup.AddHubNotificationEvent(811, chunk.Name, "A session error occured, unable to use a new session to process chunk (queue is closed)");
                    }
                    //throw new Exception("Something went wrong with this consumer");
                }
                catch (OperationCanceledException) {
                    Logger.Append(Severity.DEBUG2, "Consumer task has been manually cancelled on purpose, stopping...");
                    s.TransfertDoneEvent -= new Session.TransfertDoneHandler(ManageChunkSent);
                    return;
                }

                /*Logger.Append(Severity.INFO, "DataProcessorStreams statistics : checksum="+BenchmarkStats.Instance().ChecksumTime
                 +"ms, dedup="+BenchmarkStats.Instance().DedupTime
                 +"ms, compress="+BenchmarkStats.Instance().CompressTime
                 +"ms, send="+BenchmarkStats.Instance().SendTime+"ms.");*/
            }
            s.TransfertDoneEvent -= new Session.TransfertDoneHandler(ManageChunkSent);
            Logger.Append(Severity.DEBUG, "Session with node #" + s.ClientId + ": processed and transferred all data chunks, unused budget=" + s.Budget);
        }
Exemple #4
0
        internal DataPipeline_(PipelineMode mode, Session s, Backup b, DataProcessingFlags flags)
        {
            backup = b;

            storageSession = s;
            BinaryFormatter formatter = new BinaryFormatter();
            BChunkHeader    header    = new BChunkHeader();

            header.DataFlags = flags;

            header.Version = Utilities.PlatForm.Instance().NodeVersion;
            //header.TaskId = taskId;
            header.TaskId = b.TaskId;

            // end-of-chain stream
            sessionStream = new NetworkStream(storageSession.DataSocket);

            this.Flags = flags;
#if DEBUG
            if (ConfigManager.GetValue("BENCHMARK") != null)
            {
                sessionStream = new DummyStream();
            }
#endif
            if (flags.HasFlag(DataProcessingFlags.CChecksum))
            {
                finalStream = new NullSinkStream(new ChunkHasherStream(sessionStream), mode);
            }
            else
            {
                finalStream = new NullSinkStream(sessionStream, mode);                // dummy dest stream
            }
            //firstStream = new NullSinkStream(); // test and benchmarking

            // top-of-chain streams
            firstStream = finalStream;
            if (flags.HasFlag(DataProcessingFlags.CEncrypt))
            {
                EncryptorStream encStream = new EncryptorStream(firstStream, true, null);
                header.EncryptionMetaData = encStream.EncryptionMetadata;

                // TODO !! take encryptionMetadata and add it to index
                firstStream = encStream;
            }
            if (flags.HasFlag(DataProcessingFlags.CCompress))
            {
                //firstStream = new CompressorStream(firstStream, CompressorAlgorithm.Lzo, 1024);
                firstStream = new GZCompressorStream(firstStream, System.IO.Compression.CompressionMode.Compress);
            }
            if (flags.HasFlag(DataProcessingFlags.CDedup))
            {
                cdds = new ClientDeduplicatorStream(firstStream, s.ClientId);

                /*try{ // TODO ! remove cksum provider selection from here, find a more elegant solution
                 *      firstStream = new ChecksummerStream_MHash((ClientDeduplicatorStream)cdds);
                 * }
                 * catch(Exception e){*/
                firstStream = new ChecksummerStream((ClientDeduplicatorStream)cdds);
                //firstStream = new TigerTreeHasherStream((ClientDeduplicatorStream)cdds);

                /*}*/
                // Pre-Initialize dedup index (if needed)
                DedupIndex.Instance().Initialize();
            }
            MemoryStream headerStream = new MemoryStream();
            formatter.Serialize(headerStream, header);
            headerData = headerStream.ToArray();
            Logger.Append(Severity.INFO, "Created data pipeline with flags " + flags.ToString());
            //privilegesManager = new Utilities.PrivilegesManager();
        }
Exemple #5
0
        private void CreateStorageSessions(PeerNode askingNode, List <PeerNode> targetNodes, Task currentTask, int budget, DataProcessingFlags flags, bool isIndexStorage)
        {
            foreach (PeerNode chunkDestNode in targetNodes)
            {
                PeerSession targetSess = null;
                try{
                    int sessId = sessionsList.Count + 1;
                    targetSess = new PeerSession {
                        FromNode = askingNode,
                        ToNode   = chunkDestNode,
                        Id       = sessId,                   //sessionId,
                        Flags    = flags,                    //currentTask.BackupSet.DataFlags,
                        TaskId   = currentTask.Id,
                        Kind     = SessionType.Backup,
                        Secret   = currentTask.EncryptionKey
                    };
                    targetSess.RenewBudget(budget);
                    CreateStorageSession(targetSess, currentTask, isIndexStorage);

                    //if (SessionChanged != null && existingSession == null) SessionChanged(true, SessionType.Backup, targetSess.Id, this, chunkDestNode, currentTask.Id, budget);
                    // 3 - we add the storage node(s) to task
                    currentTask.AddStorageNode(chunkDestNode);
                }
                catch (IOException ioe) {
                    // change back destination's available space
                    //chunkDestNode.Available = chunkDestNode.Available + currentTask.BackupSet.MaxChunkSize*budget;
                    Utilities.Logger.Append("HUBRN", Severity.ERROR, "dest " + chunkDestNode.Name + " not available (" + ioe.Message + "), looking for an alternate one");
                    // try another node, recursive call
                    //ChooseStorage(askingNode.Id, s, 1, false, true);
                    ChooseStorage(askingNode.Id, new PeerSession {
                        TaskId = currentTask.Id, Id = -1
                    }, 1, false, true);
                }
                catch (Exception ex) {
                    Utilities.Logger.Append("HUBRN", Severity.ERROR, "dest " + chunkDestNode.Name + " : " + ex.Message);
                }
            }
        }
Exemple #6
0
        // TODO : split this into a dedicated consumer class, also merge ChunkProcessor in this future class
        // using a class will allow to track what the consumer is doing, and cancel it if it blocks on an empty chunksFeeds.Take (when everything has yet been processed)
        private void Consume(Session s /*, int budget*/)
        {
            if (/*chunksFeeds.IsCompleted*/ megaQueue.Count == 0 && doneBdhProducers == 0 && chunksFeeds.Count == 0)
            {
                Console.WriteLine("------------------------- CONSUME() : procducers queues marked complete and already processed, exiting");
                return;
            }

            // Filter client-side processing flags
            DataProcessingFlags clientFlags = DataProcessingFlags.None;

            foreach (DataProcessingFlags flag in Enum.GetValues(typeof(DataProcessingFlags)))
            {
                if ((int)flag < 512 && backup.BackupSet.DataFlags.HasFlag(flag))
                {
                    clientFlags |= flag;
                }
            }

            DataPipeline pipeline = new DataPipeline(PipelineMode.Write, clientFlags, this.backup.BackupSet.Id, dedupIndex);

            if (backup.BackupSet.DataFlags.HasFlag(DataProcessingFlags.CDedup))
            {
                pipeline.StorageNode = s.ToNode.Id;
            }
            if (backup.BackupSet.DataFlags.HasFlag(DataProcessingFlags.CEncrypt))
            {
                //X509Certificate2 cert = new X509Certificate2(ConfigManager.GetValue("Security.CertificateFile"), "");
                //pipeline.CryptoKey = (RSACryptoServiceProvider)cert.PublicKey.Key;
                pipeline.CryptoKey = s.CryptoKey;

                byte[] iv = new byte[16];
                Array.Copy(System.BitConverter.GetBytes(backup.Id), iv, 8);
                Array.Copy(System.BitConverter.GetBytes(backup.Id), 0, iv, 8, 8);
                pipeline.IV = iv;                 //new byte[]{Convert.ToByte(backup.TaskId)};
            }
            if (chunksFeeds.Count == 0)
            {
                return;
            }
            ChunkProcessor cp = new ChunkProcessor(s, pipeline, backup, this.cancellationTokenSource.Token);

            s.TransfertDoneEvent += new Session.TransfertDoneHandler(ManageChunkSent);
            Console.WriteLine("------------------------- Consume(" + s.Id + ") : taking chunks feed from queue which has " + chunksFeeds.Count + " elements yet to processe");
            BlockingCollection <BChunk> myFeed = chunksFeeds.Take(this.cancellationTokenSource.Token);

            Console.WriteLine("------------------------- Consume(" + s.Id + ") : got a new queue to process, yummy!");

            // We transfer chunks until reaching budget or there is no more chunks to send (backup done, or severe error)
            while ((!myFeed.IsCompleted) && (s.Budget > 0))
            {
                if (cancellationTokenSource.IsCancellationRequested)
                {
                    s.LoggerInstance.Log(Severity.INFO, "Received cancellation request for task #" + backup.Id + ", stop processing...");
                    s.TransfertDoneEvent -= new Session.TransfertDoneHandler(ManageChunkSent);
                    s.SendDisconnect();
                    s.Disconnect();
                    return;
                }
                BChunk chunk = null;
                try{
                    chunk = myFeed.Take(cancellationTokenSource.Token);

                    s.LoggerInstance.Log(Severity.TRIVIA, "Processing chunk " + chunk.Name);
                    lock (processingChunks){
                        processingChunks.Add(chunk);
                    }
                    cp.Process(chunk, backup.BackupSet.MaxChunkSize);

                    if (chunk.SentEvent.Wait(180000, cancellationTokenSource.Token))
                    {
                        s.LoggerInstance.Log(Severity.TRIVIA, "Processed  chunk " + chunk.Name + ", remaining budget=" + s.Budget);
                    }
                    else                      // timeout waiting for storage node confirmation
                    {
                        Logger.Append(Severity.WARNING, "Timeout (3 minutes) waiting for storage node #" + s.ToNode.Id + " (" + s.ToNode.IP + ") confirmation, chunk " + chunk.Name);
                        backup.AddHubNotificationEvent(811, chunk.Name, "Timeout waiting for storage node #" + s.ToNode.Id + " transfer confirmation");
                        // TODO : but if have an error with one chunk, it's likely we will have one with next chunks too.
                        //		close session now instead of continuing???
                        try{
                            myFeed.Add(chunk, cancellationTokenSource.Token);
                        }
                        catch (InvalidOperationException) {
                            Logger.Append(Severity.ERROR, "Timeout waiting for storage node #" + s.ToNode.Id + " : A session error occured, unable to use a new session to process chunk (queue is closed)");
                            backup.AddHubNotificationEvent(811, chunk.Name, "Timeout waiting for storage node #" + s.ToNode.Id + " : A session error occured, unable to use a new session to process chunk (queue is closed)");
                        }
                    }
                    if (chunk.SentEvent != null)
                    {
                        chunk.SentEvent.Dispose();
                    }
                }
                catch (System.Net.Sockets.SocketException e) {
                    // error sending to storage node. Re-add chunk to list and ask another storage session to hub.
                    Console.WriteLine("############## Produce()	: TAKE refused for chunk " + chunk.Name + ": " + e.Message + " ---- " + e.StackTrace);
                    backup.AddHubNotificationEvent(811, chunk.Name, e.Message);
                    if (chunk == null)
                    {
                        return;
                    }
                    RemoveChunk(chunk);
                    try{
                        this.StorageNeeded(s, 1, false, true);
                        myFeed.Add(chunk, cancellationTokenSource.Token);
                    }
                    catch (InvalidOperationException ioe) {
                        Logger.Append(Severity.ERROR, "A session error occured, unable to use a new session to process chunk (queue is closed) : " + ioe.Message);
                        backup.AddHubNotificationEvent(811, chunk.Name, "A session error occured, unable to use a new session to process chunk (queue is closed)");
                    }
                }
                catch (OperationCanceledException) {
                    Logger.Append(Severity.TRIVIA, "Consumer task has been manually cancelled on purpose, stopping...");
                    s.TransfertDoneEvent -= new Session.TransfertDoneHandler(ManageChunkSent);
                    return;
                }
                catch (InvalidOperationException) {
                    Logger.Append(Severity.DEBUG, "Consumer task has done processing its chunks list");
                }
            }
            if (!myFeed.IsCompleted)
            {
                // re-add this non-terminated chunk list on queue, to be processed by next session.
                Console.WriteLine("    ----------  Consumer : feed not completed, re-adding to chunksFeeds");
                chunksFeeds.Add(myFeed, cancellationTokenSource.Token);
                Console.WriteLine("    ----------  Consumer : feed not completed, re-added, chunksFeeds count=" + chunksFeeds.Count);
            }
            s.TransfertDoneEvent -= new Session.TransfertDoneHandler(ManageChunkSent);
            Logger.Append(Severity.DEBUG, "Session #" + s.Id + " with node #" + s.ToNode.Id + ": processed and transferred all data chunks, unused budget=" + s.Budget);
        }