示例#1
0
 public override void Flush()
 {
     //resize array to real data size
     // TODO !! allow quicklz.compress to take offset and length, to avoid resizes...
     Array.Resize(ref gatherBuffer, currentGatherPos);
     DoCompressAndWrite();
     inputStream.Flush();
     gatherBuffer     = new byte[minBlockSize];
     currentGatherPos = 0;
 }
示例#2
0
 public override void Flush()
 {
     inputStream.Flush();
 }
示例#3
0
 public override void Flush()
 {
     //gz.Flush(); // TODO : BUGGY!!! we should really call flush here.
     innerStream.Flush();
 }
示例#4
0
        internal void Process(BChunk chunk, long maxChunkSize)
        {
            finalStream.SetLength(0);
            //privilegesManager.Grant();
            try{
                storageSession.AnnounceChunkBeginTransfer(chunk.Name, this.HeaderLength);
                sessionStream.Write(headerData, 0, headerData.Length);
            }
            catch (Exception ioe) {
                storageSession.LoggerInstance.Log(Severity.ERROR, "network I/O error : " + ioe.Message /*+"---"+ioe.StackTrace*/);
                backup.AddHubNotificationEvent(904, "", ioe.Message);
                if (ioe.InnerException != null)
                {
                    throw(ioe.InnerException);
                }
            }
            chunk.Size = headerData.Length;
            DateTime startChunkBuild = DateTime.Now;
            Stream   fs = null;

            byte[] content = new byte[1024 * 512]; // read 512k at once
            long   sent    = 0;                    // to know chunk final size (after pipeling processing streams)

            foreach (IFile file in chunk.Files)
            {
                if (file == null || file.Kind != FileType.File)
                {
                    continue;
                }
                // if at this stage IFile already has metadata, it does not need to be processed:
                //  we are running a diff/incr, and file has only got metadata change, or has been renamed/moved/deleted

                /*if(file.BlockMetadata.BlockMetadata.Count >0){
                 *      if(!(file.BlockMetadata.BlockMetadata[0] is ClientDedupedBlock)){
                 *      Console.WriteLine ("Process() : detected early metadata, file "+file.FileName+" has been renamed/deleted/metadataonlychanged");
                 *      continue;
                 *      }
                 * }*/
                long offset     = file.FileStartPos;           // if a file is split into multiple chunks, start reading at required filepart pos
                long remaining  = file.FileSize;
                int  read       = 0;
                int  reallyRead = 0;
                try{
                    fs = file.OpenStream(FileMode.Open);
                    long seeked = fs.Seek(offset, SeekOrigin.Begin);
                    //Console.WriteLine ("Process() : seeked "+seeked+"/"+offset);
                    if (seeked != offset)
                    {
                        storageSession.LoggerInstance.Log(Severity.ERROR, "Unable to seek to required position ( reached " + seeked + " instead of " + offset + ") in file " + file.SnapFullPath);
                        backup.AddHubNotificationEvent(912, file.SnapFullPath, "Seek error");
                    }
                    // we read from snapshot but we need to set original file path:
                    // TODO !!! change back filename to original path (remove snapshotteds path)
                    //file.FileName = file.FileName.Replace(snapPath, bPath);
                }
                catch (Exception e) {
                    storageSession.LoggerInstance.Log(Severity.ERROR, "Unable to open file " + file.SnapFullPath + ": " + e.Message);
                    backup.AddHubNotificationEvent(912, file.SnapFullPath, e.Message);
                    try{
                        fs.Close();
                    }catch {}
                    //chunk.RemoveItem(file); // we don't want a failed item to be part of the backup index
                    continue;
                }
                try{
                    while ((read = fs.Read(content, 0, content.Length)) > 0 && reallyRead <= maxChunkSize && !cancelRequested)
                    {
                        // if file has to be splitted, take care to read no more than maxchunksize
                        if (reallyRead + read > maxChunkSize)
                        {
                            read = (int)maxChunkSize - reallyRead;
                            if (read == 0)
                            {
                                break;
                            }
                        }
                        remaining  -= read;
                        offset     += read;
                        reallyRead += read;
                        //try{
                        firstStream.Write(content, 0, read);
                        //}

                        /*catch(Exception e){
                         *      storageSession.LoggerInstance.Log (Severity.ERROR, "Could not write to pipeline streams : "+e.Message+" --- \n"+e.StackTrace);
                         *      if(e.InnerException != null)
                         *              storageSession.LoggerInstance.Log (Severity.ERROR, "Could not write to pipeline streams, inner stack trace : "+e.InnerException.Message+" ---- \n"+e.InnerException.StackTrace);
                         *      throw(e);
                         * }*/
                        sent += read;
                        //fs.Seek(offset, SeekOrigin.Begin);
                    }
                    // now we correct FileSize with REAL size (which includes Alternate Streams on NT)
                    // TODO report to hub
                    // TODO 2: if total file size is < than expected, file has changed too.
                    if (offset > file.FileSize /*&& Utilities.PlatForm.IsUnixClient()*/)
                    {
                        Logger.Append(Severity.WARNING, "TODO:report File '" + file.SnapFullPath + "' has changed during backup : expected size " + file.FileSize + ", got " + offset);
                        backup.AddHubNotificationEvent(903, file.SnapFullPath, "");
                    }
                    //Console.WriteLine("Built : file="+file.FileName+", size="+file.FileSize+", read="+offset+"\n");
                    file.FileSize = offset;
                    firstStream.FlushMetadata();
                    file.BlockMetadata.BlockMetadata.AddRange(finalStream.BlockMetadata);
                    //Console.WriteLine ("file "+file.FileName+" has "+file.BlockMetadata.BlockMetadata.Count+" metadata blocks");
                    finalStream.BlockMetadata.Clear();
                    fs.Close();

                    chunk.OriginalSize += reallyRead;
                }
                catch (Exception ioe) {
                    fs.Close();
                    if (ioe.InnerException is SocketException)
                    {
                        storageSession.LoggerInstance.Log(Severity.ERROR, "I/O error, could not process file " + file.SnapFullPath + " of chunk " + chunk.Name + ": " + ioe.Message /*+"---"+ioe.StackTrace*/);
                        backup.AddHubNotificationEvent(904, file.SnapFullPath, ioe.Message);
                        throw(ioe.InnerException);
                    }
                    else
                    {
                        storageSession.LoggerInstance.Log(Severity.ERROR, "Could not process file " + file.SnapFullPath + " of chunk " + chunk.Name + ": " + ioe.Message + "---" + ioe.StackTrace);
                        backup.AddHubNotificationEvent(912, file.SnapFullPath, ioe.Message);
                        continue;
                    }
                }
            }                   // end foreach ifile

            firstStream.Flush();
            finalStream.Flush();
            DateTime endChunkBuild = DateTime.Now;
            TimeSpan duration      = endChunkBuild - startChunkBuild;

            chunk.Size += finalStream.Length;
            //chunk.Size += sessionStream.Length;
#if DEBUG
            if (ConfigManager.GetValue("BENCHMARK") != null)
            {
                storageSession.AnnounceChunkEndTransfer(chunk.Name, 0);
            }
            else
            {
                storageSession.AnnounceChunkEndTransfer(chunk.Name, chunk.Size);
            }
#else
            storageSession.AnnounceChunkEndTransfer(chunk.Name, chunk.Size);
#endif
            //privilegesManager.Revoke();
            storageSession.LoggerInstance.Log(Severity.DEBUG, "Processed and transferred " + chunk.Name + ", original size=" + chunk.OriginalSize / 1024 + "k, final size=" + chunk.Size / 1024 + "k, " + chunk.Files.Count + " files in " + duration.Seconds + "." + duration.Milliseconds + " s, " + Math.Round((chunk.OriginalSize / 1024) / duration.TotalSeconds, 0) + "Kb/s");
        }
示例#5
0
 public override void Flush()
 {
     innerStream.Flush();
 }