Esempio n. 1
0
 public override RC Write(byte[] buffer, int amount, long offset)
 {
     // An in-memory journal file should only ever be appended to. Random access writes are not required by sqlite.
     Debug.Assert(offset == _endpoint.Offset);
     var b = 0;
     while (amount > 0)
     {
         var chunk = _endpoint.Chunk;
         var chunkOffset = (int)(_endpoint.Offset % JOURNAL_CHUNKSIZE);
         var space = Math.Min(amount, JOURNAL_CHUNKSIZE - chunkOffset);
         if (chunkOffset == 0)
         {
             // new chunk is required to extend the file.
             var newChunk = new FileChunk();
             if (newChunk == null)
                 return RC.IOERR_NOMEM;
             newChunk.Next = null;
             if (chunk != null) { Debug.Assert(First != null); chunk.Next = newChunk; }
             else { Debug.Assert(First == null); First = newChunk; }
             _endpoint.Chunk = newChunk;
         }
         Buffer.BlockCopy(buffer, b, _endpoint.Chunk.Chunk, chunkOffset, space);
         b += space;
         amount -= space;
         _endpoint.Offset += space;
     }
     return RC.OK;
 }
Esempio n. 2
0
        public ChunkedFile(Stream fileStream, long fileSize, long chunkSize)
        {
            long chunkCount = fileSize / chunkSize;

            if (chunkCount < 1) {
                Chunks = null;
            } else {
                Chunks = new FileChunk[chunkCount];

                Console.WriteLine("Filesize of {0} gives {1} chunks.", fileSize, chunkCount);

                byte[] data = new byte[chunkSize];
                for (long i = 0; i < chunkCount; i++) {
                    fileStream.Read(data, 0, (int)chunkSize);
                    Chunks[i] = new FileChunk();
                    Chunks[i].Offset = i * chunkSize;
                    CalculateChecksum(data, 0, chunkSize, Chunks[i].Checksum);
                }
                data = null;

                Array.Sort(Chunks);

                /*foreach (var c in Chunks) {
                    Console.WriteLine("Mem: {0} Chk: {1}", c.Offset, c.Checksum.ToString());
                    Console.ReadLine();
                }*/
            }
        }
Esempio n. 3
0
 void ReceiveFileChunk(IMail mail, FileChunk chunk)
 {
     using (var fs = File.OpenWrite(chunk.Name))
     {
         fs.Position = chunk.Position;
         fs.Write(chunk.Bytes, 0, chunk.Bytes.Length);
     }
 }
Esempio n. 4
0
        /*
        ** Write data to the file.
        */
        static int memjrnlWrite(
            sqlite3_file pJfd,  /* The journal file into which to write */
            byte[] zBuf,        /* Take data to be written from here */
            int iAmt,           /* Number of bytes to write */
            sqlite3_int64 iOfst /* Begin writing at this offset into the file */
            )
        {
            MemJournal p      = (MemJournal)pJfd;
            int        nWrite = iAmt;

            byte[] zWrite  = zBuf;
            int    izWrite = 0;

            /* An in-memory journal file should only ever be appended to. Random
            ** access writes are not required by sqlite.
            */
            Debug.Assert(iOfst == p.endpoint.iOffset);
            UNUSED_PARAMETER(iOfst);

            while (nWrite > 0)
            {
                FileChunk pChunk       = p.endpoint.pChunk;
                int       iChunkOffset = (int)(p.endpoint.iOffset % JOURNAL_CHUNKSIZE);
                int       iSpace       = MIN(nWrite, JOURNAL_CHUNKSIZE - iChunkOffset);

                if (iChunkOffset == 0)
                {
                    /* New chunk is required to extend the file. */
                    FileChunk pNew = new FileChunk();// sqlite3_malloc( sizeof( FileChunk ) );
                    if (null == pNew)
                    {
                        return(SQLITE_IOERR_NOMEM);
                    }
                    pNew.pNext = null;
                    if (pChunk != null)
                    {
                        Debug.Assert(p.pFirst != null);
                        pChunk.pNext = pNew;
                    }
                    else
                    {
                        Debug.Assert(null == p.pFirst);
                        p.pFirst = pNew;
                    }
                    p.endpoint.pChunk = pNew;
                }

                Buffer.BlockCopy(zWrite, izWrite, p.endpoint.pChunk.zChunk, iChunkOffset, iSpace); //memcpy( &p.endpoint.pChunk.zChunk[iChunkOffset], zWrite, iSpace );
                izWrite            += iSpace;                                                      //zWrite += iSpace;
                nWrite             -= iSpace;
                p.endpoint.iOffset += iSpace;
            }

            return(SQLITE_OK);
        }
Esempio n. 5
0
 public FileChunk Compress(FileChunk inputChunk)
 {
     using (var outputStream = new MemoryStream())
     {
         using (var compressionStream = new GZipStream(outputStream, CompressionMode.Compress, true))
         {
             compressionStream.Write(inputChunk.Content, 0, inputChunk.Content.Length);
         }
         return(new FileChunk(inputChunk.Id, outputStream.ToArray()));
     }
 }
Esempio n. 6
0
        public void AddChunkPeer(FileChunk chunk, IPAddress address)
        {
            var chunkPeer = new ChunkPeer {
                Chunk = chunk.FileName, IP = address.Address
            };

            lock (_sync)
            {
                _chunkPeers.Add(chunkPeer, new Nothing());
            }
        }
Esempio n. 7
0
        private async Task Incoming(HttpListenerContext context)
        {
            try
            {
                _tracingHttpServerSource.TraceEvent(TraceEventType.Verbose, TRACEEVENT_RECEIVED);
                Interlocked.Increment(ref _requestCount);

                HttpClientImpl client = new HttpClientImpl(context)
                {
                    StartTicks = _timer.ElapsedTicks
                };

                var currentActive = Interlocked.Increment(ref _activeCount);
                var maxActive     = Interlocked.Read(ref _maxActive);
                maxActive = Math.Max(currentActive, maxActive);
                Interlocked.Exchange(ref _maxActive, maxActive); // it is possible to overwrite a higher value on a different thread but it's good enough

                byte[] buffer = new byte[1024];
                int    read   = 0;

                ChunkBase root = null;

                if (Path.GetExtension(context.Request.RawUrl) == "")
                {
                    client.Context.Response.ContentType = "application/json";
                    root = new RESTChunk(this);
                }
                else if (context.Request.RawUrl.EndsWith(".page"))
                {
                    client.Context.Response.ContentType = "text/html";
                    root = new PageChunk(this, null);
                }
                else
                {
                    client.Context.Response.ContentType = "application/octet";
                    root = new FileChunk(this);
                }

                await root.Send(client, null);

                client.StopTicks = _timer.ElapsedTicks;

                Interlocked.Add(ref _tickCount, client.TimetoLastByte);
            }
            catch (Exception ex)
            {
                _tracingHttpServerSource.TraceData(TraceEventType.Error, TRACEEVENT_ERROR, ex);
            }
            finally
            {
                Interlocked.Increment(ref _requestCompleteCount);
                Interlocked.Decrement(ref _activeCount);
            }
        }
Esempio n. 8
0
        internal async Task <UploadResult> PutFileHttpContext(string id, HttpContextWrapper httpContextWrapper)
        {
            var fileId   = Models.File.ConvertToStoreId(id);
            var fileHead = await _filesRepo.GetFileHead(fileId);

            if (fileHead == null)
            {
                return(new UploadResult
                {
                    FileId = null,
                    Status = HttpStatusCode.NotFound
                });
            }
            int startingChunkNumber = fileHead.ChunkCount + 1;
            var chunk = new FileChunk()
            {
                ChunkNum = startingChunkNumber,
                FileId   = fileHead.FileId
            };

            long fileSize;

            try
            {
                using (var stream = httpContextWrapper.Request.GetBufferlessInputStream())
                {
                    var isMultipart = Request.Content.IsMimeMultipartContent();
                    if (isMultipart)
                    {
                        fileSize = await PutFileMultipart(stream, chunk);
                    }
                    else
                    {
                        fileSize = await PostFileInChunks(stream, chunk);
                    }
                }
                await _filesRepo.UpdateFileHead(chunk.FileId, fileHead.FileSize + fileSize, chunk.ChunkNum - 1);
            }
            catch
            {
                // Delete all chunks after the starting chunk of this PUT.
                await DeleteFileChunks(chunk.FileId, startingChunkNumber);

                throw;
            }

            await _log.LogVerbose(WebApiConfig.LogSourceFiles, $"PUT:{id}, Chunks were added in PUT. Total chunks in file:{chunk.ChunkNum - 1}");

            return(new UploadResult
            {
                FileId = chunk.FileId,
                Status = HttpStatusCode.OK
            });
        }
        public void OnNext(PutChunkMessage msg)
        {
            var fileChunk = new FileChunk(msg.FileId, msg.ChunkNo);

            Core.Instance.ChunkPeers.SetWantedReplicationDegree(fileChunk, msg.ReplicationDeg);

            Task.Delay(Core.Instance.RandomDelay).Wait();

            // file already exists, no need to verify sizes or rep degree
            if (fileChunk.Exists())
            {
                Core.Instance.MCChannel.Send(new StoredMessage(fileChunk));
                return;
            }

            var count = Core.Instance.ChunkPeers.CountChunkPeer(fileChunk);

            if (count >= msg.ReplicationDeg)
            {
                Core.Instance.Log.InfoFormat("EnhancedBackupChunkService: Not storing {0}#{1} because replication degree " +
                                             "has been ensured by other peers (got {2}, wanted {3})",
                                             msg.FileId.ToStringSmall(), msg.ChunkNo, count, msg.ReplicationDeg);
                return;
            }

            var dirSize = Utilities.Utilities.GetDirectorySize(Core.Instance.Config.BackupDirectory);

            if (dirSize + msg.Body.Length > Core.Instance.Config.MaxBackupSize)
            {
                Core.Instance.Log.InfoFormat(
                    "EnhancedBackupChunkService:OnNext: Got no space to store {0}, trying to evict some other chunks", fileChunk);
                new SpaceReclaimingProtocol(false).Run().Wait();

                dirSize = Utilities.Utilities.GetDirectorySize(Core.Instance.Config.BackupDirectory);
                if (dirSize + msg.Body.Length > Core.Instance.Config.MaxBackupSize)
                {
                    Core.Instance.Log.InfoFormat(
                        "EnhancedBackupChunkService:OnNext: Really have no space to store any file. Giving up on storing {0}",
                        fileChunk);
                    return;
                }
            }

            var stored = fileChunk.SetData(msg.Body);

            if (!stored.HasValue)
            {
                Core.Instance.Log.ErrorFormat("EnhancedBackupChunkService: Could not store file {0}", fileChunk);
                return;
            }
            // otherwise file is already created

            Core.Instance.MCChannel.Send(new StoredMessage(fileChunk));
        }
Esempio n. 10
0
        public async Task SendFileChunk(Recipient recipient, FileChunk fileChunk)
        {
            var httpClient = _httpClientFactory.CreateClient("default");

            var address = $"{recipient.Address}/api/file/{fileChunk.FileId}/chunk/{fileChunk.SequenceNo}/{Environment.MachineName}";
            var content = new ByteArrayContent(fileChunk.GetValue());

            content.Headers.ContentType = new MediaTypeHeaderValue("application/octet-stream");
            var response = await httpClient.PostAsync(address, content);

            response.EnsureSuccessStatusCode();
        }
Esempio n. 11
0
        protected override async Task ExecuteFunctionAsync(Stream stream)
        {
            // Gets current part's header information
            var fileName = MultipartPartParser.Filename.Replace("\"", string.Empty).Replace("%20", " ");
            var fileType = MultipartPartParser.ContentType;

            await _log.LogVerbose(WebApiConfig.LogSourceFiles, $"POST: Posting first multi-part file {fileName}");

            _fileChunk = await _function(fileName, fileType, MultipartPartParser, _expired);

            await _log.LogVerbose(WebApiConfig.LogSourceFiles, $"POST: Chunks posted {_fileChunk.ChunkNum - 1}");
        }
Esempio n. 12
0
        public void OnNext(StoredMessage msg)
        {
            var fc = new FileChunk(msg.FileId, msg.ChunkNo);

            if (!Core.Instance.ChunkPeers.HasChunkPeer(fc, msg.RemoteEndPoint.Address))
            {
                if (Core.Instance.ChunkPeers.GotWantedReplicationDegree(fc))
                {
                    Core.Instance.ChunkPeers.AddChunkPeer(fc, msg.RemoteEndPoint.Address);
                }
            }
        }
Esempio n. 13
0
        public async Task <int> PostFileChunk(FileChunk chunk)
        {
            var prm = new DynamicParameters();

            prm.Add("@FileId", chunk.FileId);
            prm.Add("@ChunkNum", chunk.ChunkNum);
            prm.Add("@ChunkSize", chunk.ChunkSize);
            prm.Add("@ChunkContent", chunk.ChunkContent);
            await _connectionWrapper.ExecuteAsync("[FileStore].InsertFileChunk", prm, commandTimeout : _commandTimeout, commandType : CommandType.StoredProcedure);

            return(chunk.ChunkNum + 1);
        }
Esempio n. 14
0
        private void LoadInternal(eOpenType xiOpenType, string xiFilename)
        {
            Chunk  lNewRootChunk  = null;
            string lExceptionWhen = "opening file";

            try
            {
                using (FileStream fs = File.OpenRead(xiFilename))
                {
                    lExceptionWhen = "deserialising the file";
                    switch (xiOpenType)
                    {
                    case eOpenType.LevelBinary:
                        lNewRootChunk = new Level(fs);
                        break;

                    case eOpenType.UnknownBinary:
                        lNewRootChunk = new FileChunk(fs);
                        break;

                    case eOpenType.Mmv:
                        lNewRootChunk = new VersionList(fs);
                        break;

                    case eOpenType.Xml:
                        XmlSerializer xs = new XmlSerializer(typeof(Chunk));
                        lNewRootChunk = (Chunk)xs.Deserialize(fs);
                        break;

                    default: throw new Exception("unreachable case");
                    }

                    if (fs.Length != fs.Position)
                    {
                        //check the whole file has been read
                        throw new DeserialisationException(string.Format("Deserialisation terminated early at byte {0} of {1}", fs.Position, fs.Length));
                    }
                }
            }
            catch (Exception err)
            {
                Trace.WriteLine(err);
                MessageBox.Show(string.Format("Exception occurred while {0}: {1}", lExceptionWhen, err.Message), "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
                return;
            }
            // level loaded OK, now fill tree:
            RootChunk = lNewRootChunk;
            mLocalSettings.LastOpenedFile = xiFilename;
            mLocalSettings.LastOpenedType = xiOpenType;
            mCurrentFile     = xiFilename;
            mCurrentFileMode = xiOpenType == eOpenType.Mmv ? eSaveMode.Mmv : xiOpenType == eOpenType.Xml ? eSaveMode.Xml : eSaveMode.Binary;
        }
Esempio n. 15
0
        internal async Task <long> PutFileMultipart(Stream stream, FileChunk chunk)
        {
            using (var putFileMultipart = new PutMultipartReader(stream, chunk, PostFileInChunks, _log))
            {
                await putFileMultipart.ReadAndExecuteRequestAsync();

                var fileSize = putFileMultipart.GetFileSize();
                if (fileSize.HasValue)
                {
                    return(fileSize.Value);
                }
                throw new Exception("File size does not have a value after executing the PUT");
            }
        }
Esempio n. 16
0
        public void ProcessTaskTest_WorksFine()
        {
            // arrange
            var inputFilePath  = "inputPath";
            var outputFilePath = "outputPath";
            var filePaths      = new FilePaths
            {
                InputFilePath  = inputFilePath,
                OutputFilePath = outputFilePath
            };

            var taskNumber            = 1;
            var resetEvent            = new ManualResetEventSlim();
            Func <int, bool> callback = number => true;
            var taskSyncParams        = new TaskSynchronizationParams
            {
                TaskNumber     = taskNumber,
                ResetEvent     = resetEvent,
                CanProceedFunc = callback
            };
            var startPosition = 10;
            var readBytes     = 200;
            var fileChunk     = new FileChunk
            {
                StartPosition = startPosition,
                ReadBytes     = readBytes
            };

            var operation = new Mock <IOperation>();
            var gzipTask  = new GZipTask(filePaths, taskSyncParams, fileChunk, operation.Object);

            var fileChunkRead      = new byte[] { 0, 100, 120, 210, 255 };
            var fileChunkProcessed = new byte[] { 10, 11, 12 };

            // expectations
            fileManager.Setup(_ => _.ReadFileChunk(inputFilePath, startPosition, readBytes))
            .Returns(fileChunkRead);

            operation.Setup(_ => _.ProcessChunk(fileChunkRead))
            .Returns(fileChunkProcessed);

            fileManager.Setup(_ => _.WriteFileChunk(outputFilePath, fileChunkProcessed))
            .Verifiable();

            // act
            target.ProcessTask(gzipTask);

            // assert
            fileManager.VerifyAll();
        }
        public void OnNext(RemovedMessage msg)
        {
            var fileChunk = new FileChunk(msg.FileId, msg.ChunkNo);

            if (!Core.Instance.ChunkPeers.RemoveChunkPeer(fileChunk, msg.RemoteEndPoint.Address))
            {
                return; // we don't have this file, ignore
            }
            int wantedDegree, actualDegree;

            Core.Instance.ChunkPeers.TryGetDegrees(fileChunk, out wantedDegree, out actualDegree);
            if (actualDegree >= wantedDegree)
            {
                return;                               // can't delete because
            }
            try
            {
                var putChunkReceived = false;
                var disposable       = Core.Instance.MDBChannel.Received
                                       .Where(message => message.MessageType == MessageType.PutChunk)
                                       .Cast <PutChunkMessage>()
                                       .Where(message => message.ChunkNo == fileChunk.ChunkNo &&
                                              message.FileId == fileChunk.FileId)
                                       .Subscribe(_ => putChunkReceived = true);

                Task.Delay(Core.Instance.RandomDelay).Wait(); // random delay uniformly distributed

                if (!putChunkReceived)
                {
                    var data = fileChunk.GetData();
                    if (data == null)
                    {
                        Core.Instance.Log.ErrorFormat(
                            "SpaceReclaimingService: Could not start BackupChunkProtocol" +
                            " for {0} because it no longer exists here.", fileChunk);
                    }
                    else
                    {
                        new BackupChunkSubprotocol(fileChunk, wantedDegree, data).Run();
                    }
                }

                disposable.Dispose();
            }
            catch (Exception ex)
            {
                Core.Instance.Log.Error("SpaceReclaimingService", ex);
            }
        }
Esempio n. 18
0
 static IEnumerable<FileChunk> Chunks(string name, Stream stream)
 {
     var r = new BinaryReader(stream);
     while (r.BaseStream.Position != r.BaseStream.Length)
     {
         var chunk = new FileChunk
         {
             Name = name,
             Position = r.BaseStream.Position,
             Bytes = r.ReadBytes(
                 Math.Min(4 * 1024 * 1024, (int)(r.BaseStream.Length - r.BaseStream.Position))),
         };
         yield return chunk;
     }
 }
        private static bool DeleteChunk(FileChunk chunk)
        {
            if (!chunk.Delete())
            {
                Core.Instance.Log.ErrorFormat("SpaceReclaimingProtocol: Could not delete file {0}", chunk);
                return(false);
            }

            // Not updating actualDegree here. Will be done when we receive our own REMOVED.

            var msg = new RemovedMessage(chunk);

            Core.Instance.MCChannel.Send(msg);
            return(true);
        }
Esempio n. 20
0
        /// <summary> Compression task logic implementation </summary>
        /// <param name="filePaths"> Input and output file paths </param>
        /// <param name="taskSynchronizationParams"> Parameters needed to organize thread task synchronization </param>
        /// <param name="fileChunk"> Contains parameters needed during compression logic </param>
        /// <param name="operation"> Process operation type </param>
        public GZipTask(FilePaths filePaths,
                        TaskSynchronizationParams taskSynchronizationParams,
                        FileChunk fileChunk,
                        IOperation operation)
        {
            Guard.NotNull(filePaths, $"{nameof(filePaths)}");
            Guard.NotNull(taskSynchronizationParams, $"{nameof(taskSynchronizationParams)}");
            Guard.NotNull(fileChunk, $"{nameof(fileChunk)}");
            Guard.NotNull(operation, $"{nameof(operation)}");

            FilePathParams            = filePaths;
            TaskSynchronizationParams = taskSynchronizationParams;
            FileChunk = fileChunk;
            Operation = operation;
        }
Esempio n. 21
0
        public void CreateTest_WorksFine()
        {
            // arrange
            var pathParams = new FilePaths();
            var syncParams = new TaskSynchronizationParams();
            var chunk      = new FileChunk();
            var operation  = new Mock <IOperation>();
            var expected   = new GZipTask(pathParams, syncParams, chunk, operation.Object);

            // act
            var actual = target.Create(pathParams, syncParams, chunk, operation.Object);

            // assert
            actual.Should().BeEquivalentTo(expected);
        }
        /// <summary>
        /// 将分片文件合并成一个文件
        /// </summary>
        /// <param name="chunk"></param>
        /// <returns></returns>
        private async Task MergeChunkFile(FileChunk chunk)
        {
            //上传目录
            var path = AppDomain.CurrentDomain.BaseDirectory + "Files\\" + chunk.FileName;
            //分片文件命名约定
            var partToken = FileSort.PART_NUMBER;
            //上传文件的实际名称
            var baseFileName = chunk.FileName.Substring(0, chunk.FileName.IndexOf(partToken));
            //根据命名约定查询指定目录下符合条件的所有分片文件
            var searchPattern = $"{Path.GetFileName(baseFileName)}{partToken}*";
            //获取分片文件
            var fileList = Directory.GetFiles(Path.GetDirectoryName(path), searchPattern);

            if (!fileList.Any())
            {
                return;
            }

            var mergeFiles = new List <FileSort>();

            foreach (var file in fileList)
            {
                var sort = new FileSort
                {
                    FileName = file
                };
                baseFileName = file.Substring(0, file.IndexOf(partToken));
                var fileIndex = file.Substring(file.IndexOf(partToken) + partToken.Length);
                int.TryParse(fileIndex, out var number);
                sort.PartNumber = number;
                mergeFiles.Add(sort);
            }
            //排序所有分片
            mergeFiles = mergeFiles.OrderBy(x => x.PartNumber).ToList();
            //合并文件
            using (var fileStream = new FileStream(baseFileName, FileMode.Create))
            {
                foreach (var fileSort in mergeFiles)
                {
                    using (FileStream fileChunk = new FileStream(fileSort.FileName, FileMode.Open))
                    {
                        await fileChunk.CopyToAsync(fileStream);
                    }
                }
            }
            //删除分片文件
            DeleteFile(mergeFiles);
        }
        public async Task <IActionResult> Upload([FromQuery] FileChunk chunk)
        {
            if (!IsMultipartContentType(context.Request.ContentType))
            {
                return(BadRequest());
            }

            var boundary = GetBoundary();

            if (string.IsNullOrEmpty(boundary))
            {
                return(BadRequest());
            }

            var reader = new MultipartReader(boundary, context.Request.Body);

            var section = await reader.ReadNextSectionAsync();

            while (section != null)
            {
                var buffer   = new byte[chunk.Size];
                var fileName = GetFileName(section.ContentDisposition);
                chunk.FileName = fileName;
                var path = Path.Combine(_environment.WebRootPath, DEFAULT_FOLDER, fileName);
                using (var stream = new FileStream(path, FileMode.Append))
                {
                    int bytesRead;
                    do
                    {
                        bytesRead = await section.Body.ReadAsync(buffer, 0, buffer.Length);

                        stream.Write(buffer, 0, bytesRead);
                    } while (bytesRead > 0);
                }

                section = await reader.ReadNextSectionAsync();
            }

            //计算上传文件大小实时反馈进度(TODO)

            //合并文件(可能涉及转码等)
            if (chunk.PartNumber == chunk.Chunks)
            {
                await MergeChunkFile(chunk);
            }

            return(Ok());
        }
Esempio n. 24
0
        public async Task SendFileChunk(Recipient recipient, FileChunk fileChunk)
        {
            var client = new Transmitter.TransmitterClient(_channelStore.Get(recipient.Address));
            var result = await client.SendFileChunkAsync(new FileChunkTransmissionRequest
            {
                FileId     = fileChunk.FileId.ToString(),
                SequenceNo = fileChunk.SequenceNo,
                Host       = Environment.MachineName,
                Value      = ByteString.CopyFrom(fileChunk.GetValue())
            });

            if (!result.IsSuccessful)
            {
                throw new InvalidOperationException($"'{nameof(SendFileChunk)}' received a negative 'IsSuccessful' from '{recipient.Name}'");
            }
        }
Esempio n. 25
0
 //this is a weaker test than TestSerialisationIsInvertible
 //but has been left here as it's sometimes handy for debugging
 private void TestBinaryUnkSerialisationIsInvertible(string xiFilename)
 {
     Console.Out.WriteLine("Testing file {0}", xiFilename);
     using (Stream inStr = File.OpenRead(xiFilename))
     {
         FileChunk deser = new FileChunk();
         deser.Deserialise(inStr);
         MemoryStream memStr = new MemoryStream();
         inStr.Seek(0, SeekOrigin.Begin);
         DebugOutputStreamWithExpectations outStr = new DebugOutputStreamWithExpectations(inStr, memStr);
         deser.Serialise(outStr);
         inStr.Seek(0, SeekOrigin.Begin);
         memStr.Seek(0, SeekOrigin.Begin);
         Assert.IsTrue(StreamUtils.StreamsAreEqual(inStr, memStr));
     }
 }
Esempio n. 26
0
        public async Task <HttpResponseMessage> Post()
        {
            if (!Request.Content.IsMimeMultipartContent())
            {
                throw new HttpResponseException(HttpStatusCode.UnsupportedMediaType);
            }

            // Temp storage location for File Chunks
            MultipartMemoryStreamProvider provider = new MultipartMemoryStreamProvider();
            FileChunk chunk = null;

            try
            {
                // Read all contents of multipart message into MultipartMemoryStreamProvider.
                await Request.Content.ReadAsMultipartAsync(provider);

                using (Stream fileChunkStream = await provider.Contents[0].ReadAsStreamAsync())
                {
                    //Check for not null or empty
                    if (fileChunkStream == null)
                    {
                        throw new HttpResponseException(HttpStatusCode.NotFound);
                    }

                    // Read file chunk detail
                    chunk           = provider.Contents[0].Headers.GetMetaData();
                    chunk.ChunkData = fileChunkStream.ReadFully();

                    // Upload Chunk to blob storage and store the reference in Azure Cache
                    _operations.UploadChunk(chunk);

                    // check for last chunk, if so, then do a PubBlockList
                    // Remove all keys of that FileID from Dictionary
                    if (chunk.IsCompleted)
                    {
                        _operations.CommitChunks(chunk);
                    }
                }

                // Send OK Response along with saved file names to the client.
                return(Request.CreateResponse(HttpStatusCode.OK));
            }
            catch (System.Exception e)
            {
                return(Request.CreateErrorResponse(HttpStatusCode.InternalServerError, e));
            }
        }
Esempio n. 27
0
        public override RC Truncate(long size)
        {
            SysEx.UNUSED_PARAMETER(size);
            Debug.Assert(size == 0);
            var pChunk = pFirst;

            while (pChunk != null)
            {
                var pTmp = pChunk;
                pChunk = pChunk.pNext;
            }
            // clear
            pFirst    = null;
            endpoint  = new FilePoint();
            readpoint = new FilePoint();
            return(RC.OK);
        }
Esempio n. 28
0
 public FileChunk Compress(FileChunk inputChunk)
 {
     using (var outputStream = new MemoryStream())
     {
         using (var decompressionStream = new GZipStream(new MemoryStream(inputChunk.Content), CompressionMode.Decompress, false))
         {
             byte[] buffer    = new byte[inputChunk.Content.Length];
             int    bytesRead = decompressionStream.Read(buffer, 0, buffer.Length);
             while (bytesRead > 0)
             {
                 outputStream.Write(buffer, 0, bytesRead);
                 bytesRead = decompressionStream.Read(buffer, 0, buffer.Length);
             }
         }
         return(new FileChunk(inputChunk.Id, outputStream.ToArray()));
     }
 }
        protected override void Write()
        {
            using (var file = new FileStream(OutputFilePath, FileMode.CreateNew, FileAccess.Write))
            {
                FileChunk chunk = null;
                while (InputQueue.TryDequeue(out chunk))
                {
                    file.Write(chunk.Content, 0, chunk.Content.Length);

                    float progress = ((float)chunk.Id / TotalChunkCount);
                    Console.WriteLine(string.Format("{0:P} completed", progress));
                }

                Console.WriteLine("Completed");
                FinishEventHandle.Set();
            }
        }
Esempio n. 30
0
        /*
        ** Truncate the file.
        */
        static int memjrnlTruncate(sqlite3_file pJfd, sqlite3_int64 size)
        {
            MemJournal p = (MemJournal)pJfd;
            FileChunk  pChunk;

            Debug.Assert(size == 0);
            UNUSED_PARAMETER(size);
            pChunk = p.pFirst;
            while (pChunk != null)
            {
                FileChunk pTmp = pChunk;
                pChunk = pChunk.pNext;
                //sqlite3_free( ref pTmp );
            }
            sqlite3MemJournalOpen(pJfd);
            return(SQLITE_OK);
        }
Esempio n. 31
0
        public void NotEmpty_WorksFine_QueueNotEmpty()
        {
            // arrange
            var pathParams = new FilePaths();
            var syncParams = new TaskSynchronizationParams();
            var chunk      = new FileChunk();
            var operation  = new Mock <IOperation>();
            var task       = new GZipTask(pathParams, syncParams, chunk, operation.Object);

            target.Queue.Enqueue(task);

            // act
            var actual = target.NotEmpty();

            // assert
            Assert.True(actual);
        }
Esempio n. 32
0
        public void EnqueueTest_WorksFine()
        {
            // arrange
            var      pathParams = new FilePaths();
            var      syncParams = new TaskSynchronizationParams();
            var      chunk      = new FileChunk();
            var      operation  = new Mock <IOperation>();
            var      task       = new GZipTask(pathParams, syncParams, chunk, operation.Object);
            GZipTask actual;

            // act
            target.Enqueue(task);
            target.Queue.TryDequeue(out actual);

            // assert
            Assert.Equal(task, actual);
        }
Esempio n. 33
0
        public bool TryGetDegrees(FileChunk chunk, out int wantedDeg, out int actualDeg)
        {
            if (!HasChunkPeer(chunk, true))
            {
                wantedDeg = -1;
                actualDeg = -1;
                return(false);
            }

            if (!_wantedRepDegs.TryGetValue(chunk.FileName, out wantedDeg))
            {
                actualDeg = -1;
                return(false);
            }

            actualDeg = CountChunkPeer(chunk);
            return(true);
        }
Esempio n. 34
0
        private void ReceiveChunk(FileChunk chunk)
        {
            var dl = _downloads.Find(x => string.Equals(x.File, chunk.File));

            if (dl == null)
            {
                dl             = new DownloadState();
                dl.File        = chunk.File;
                dl.DataStream  = new MemoryStream();
                dl.StartTime   = Game.ElapsedTime;
                dl.Status      = TransferStatus.InProgress;
                dl.TotalLength = chunk.TotalLength;
                _downloads.Add(dl);
            }
            else
            {
                if (dl.Status != TransferStatus.InProgress)
                {
                    // something went wrong, tell host to stop?
                    return;
                }
            }

            dl.DataStream.Write(chunk.Data, 0, chunk.ChunkLength);
            dl.Offset += chunk.ChunkLength;
            dl.ChunkReceivedTimeout = MaxChunkReceivedTimeout;

            if (dl.DataStream.Length >= dl.TotalLength)
            {
                var fullPath = Path.Combine(Structure.RuntimePath, chunk.Temporary ? "temp" : "download", dl.File);
                var dirPath  = Path.GetDirectoryName(fullPath);
                Directory.CreateDirectory(dirPath);
                using (var fs = new FileStream(fullPath, FileMode.OpenOrCreate))
                {
                    fs.SetLength(0);
                    dl.DataStream.WriteTo(fs);
                }
                dl.DataStream.Close();
                dl.DataStream.Dispose();
                dl.DataStream = null;
                dl.Status     = TransferStatus.Success;
                _downloads.Remove(dl);
            }
        }
Esempio n. 35
0
 public static IEnumerable<FFMPEGCommand> Commands(EditorModel model, FileChunk chunk)
 {
     switch (chunk.Mode)
     {
         case Mode.Face:
             yield return new ExtractFaceVideoCommand
             {
                 VideoInput = /*chunk.SourceFilename,*/ model.Locations.ConvertedFaceVideo,
                 StartTime = chunk.StartTime,
                 Duration = chunk.Length,
                 VideoOutput = model.Locations.Make(model.ChunkFolder, chunk.ChunkFilename)
             };
             break;
         case Mode.Screen:
             yield return new ExtractAudioCommand
             {
                 AudioInput = /*chunk.SourceFilename,*/ model.Locations.ConvertedFaceVideo,
                 StartTime = chunk.StartTime,
                 Duration = chunk.Length,
                 AudioOutput = model.Locations.Make(model.ChunkFolder, chunk.AudioFilename)
             };
             yield return new ExtractScreenVideoCommand
             {
                 VideoInput = /*chunk.SourceFilename,*/ model.Locations.ConvertedDesktopVideo,
                 StartTime = chunk.StartTime - model.Montage.SynchronizationShift,
                 Duration = chunk.Length,
                 VideoOutput = model.Locations.Make(model.ChunkFolder, chunk.VideoFilename)
             };
             yield return new MixVideoAudioCommand
             {
                 VideoInput = model.Locations.Make(model.ChunkFolder, chunk.VideoFilename),
                 AudioInput = model.Locations.Make(model.ChunkFolder, chunk.AudioFilename),
                 VideoOutput = model.Locations.Make(model.ChunkFolder, chunk.ChunkFilename)
             };
             break;
     }
 }
Esempio n. 36
0
 public override RC Write(byte[] zBuf, int iAmt, long iOfst)
 {
     SysEx.UNUSED_PARAMETER(iOfst);
     // An in-memory journal file should only ever be appended to. Random access writes are not required by sqlite.
     Debug.Assert(iOfst == endpoint.iOffset);
     var izWrite = 0;
     while (iAmt > 0)
     {
         var pChunk = endpoint.pChunk;
         var iChunkOffset = (int)(endpoint.iOffset % JOURNAL_CHUNKSIZE);
         var iSpace = Math.Min(iAmt, JOURNAL_CHUNKSIZE - iChunkOffset);
         if (iChunkOffset == 0)
         {
             // New chunk is required to extend the file.
             var pNew = new FileChunk();
             if (pNew == null)
                 return RC.IOERR_NOMEM;
             pNew.pNext = null;
             if (pChunk != null) { Debug.Assert(pFirst != null); pChunk.pNext = pNew; }
             else { Debug.Assert(pFirst == null); pFirst = pNew; }
             endpoint.pChunk = pNew;
         }
         Buffer.BlockCopy(zBuf, izWrite, endpoint.pChunk.zChunk, iChunkOffset, iSpace);
         izWrite += iSpace;
         iAmt -= iSpace;
         endpoint.iOffset += iSpace;
     }
     return RC.OK;
 }
Esempio n. 37
0
    /*
    ** Write data to the file.
    */
    static int memjrnlWrite(
    sqlite3_file pJfd,    /* The journal file into which to write */
    byte[] zBuf,          /* Take data to be written from here */
    int iAmt,             /* Number of bytes to write */
    sqlite3_int64 iOfst   /* Begin writing at this offset into the file */
    )
    {
      MemJournal p = (MemJournal)pJfd;
      int nWrite = iAmt;
      byte[] zWrite = zBuf;
      int izWrite = 0;

      /* An in-memory journal file should only ever be appended to. Random
      ** access writes are not required by sqlite.
      */
      Debug.Assert( iOfst == p.endpoint.iOffset );
      UNUSED_PARAMETER( iOfst );

      while ( nWrite > 0 )
      {
        FileChunk pChunk = p.endpoint.pChunk;
        int iChunkOffset = (int)( p.endpoint.iOffset % JOURNAL_CHUNKSIZE );
        int iSpace = MIN( nWrite, JOURNAL_CHUNKSIZE - iChunkOffset );

        if ( iChunkOffset == 0 )
        {
          /* New chunk is required to extend the file. */
          FileChunk pNew = new FileChunk();// sqlite3_malloc( sizeof( FileChunk ) );
          if ( null == pNew )
          {
            return SQLITE_IOERR_NOMEM;
          }
          pNew.pNext = null;
          if ( pChunk != null )
          {
            Debug.Assert( p.pFirst != null );
            pChunk.pNext = pNew;
          }
          else
          {
            Debug.Assert( null == p.pFirst );
            p.pFirst = pNew;
          }
          p.endpoint.pChunk = pNew;
        }

        Buffer.BlockCopy( zWrite, izWrite, p.endpoint.pChunk.zChunk, iChunkOffset, iSpace ); //memcpy( &p.endpoint.pChunk.zChunk[iChunkOffset], zWrite, iSpace );
        izWrite += iSpace;//zWrite += iSpace;
        nWrite -= iSpace;
        p.endpoint.iOffset += iSpace;
      }

      return SQLITE_OK;
    }
        /// <summary>
        /// This method is used to deserialize the items of the fragment knowledge entry from the byte array.
        /// </summary>
        /// <param name="byteArray">Specify the byte array.</param>
        /// <param name="currentIndex">Specify the start index from the byte array.</param>
        /// <param name="lengthOfItems">Specify the current length of items in the fragment knowledge entry.</param>
        protected override void DeserializeItemsFromByteArray(byte[] byteArray, ref int currentIndex, int lengthOfItems)
        {
            int index = currentIndex;
            this.ExtendedGUID = BasicObject.Parse<ExGuid>(byteArray, ref index);
            this.DataElementSize = BasicObject.Parse<Compact64bitInt>(byteArray, ref index);
            this.DataElementChunkReference = BasicObject.Parse<FileChunk>(byteArray, ref index);

            if (index - currentIndex != lengthOfItems)
            {
                throw new StreamObjectParseErrorException(currentIndex, "FragmentKnowledgeEntry", "Stream object over-parse error", null);
            }

            currentIndex = index;
        }
Esempio n. 39
0
 void test(FileChunk d)
 {
     var i = 0;
 }
Esempio n. 40
0
 string GetChunkFileName(FileChunk chunk)
 {
     return recodeBeforeAssembling ? chunk.EndChunkFileName : chunk.ChunkFilename;
 }
Esempio n. 41
0
        public List<FileChunk> GetChunksForFileBlock(int blockNumber, CancellationToken cancellationToken, 
            ConcurrentDictionary<long, FileHash> remoteHashes, FileStream mappedFile, long fileBlockChunkStartOffset, long chunkLength, ProgressReporter progressReporter, Config config)
        {
            var chunkBuffer = FillBuffer(mappedFile, fileBlockChunkStartOffset, config);
            var windowSize = config.BlockLength;

            //our position in the chunk buffer
            long chunkBufferReadOffset = 0;

            //out position in the actual file block.
            var chunkReadOffset = fileBlockChunkStartOffset;

            //lets us track the 'non matched' blocks.
            var nonMatchStartOffset = chunkBufferReadOffset;

            var windowWeakChecksum = new Adler32();

            var outList = new List<FileChunk>();
            while (chunkReadOffset < chunkLength)
            {
                if (cancellationToken.IsCancellationRequested)
                    return null;

                var buffer = new byte[windowSize];

                if (chunkBufferReadOffset + config.BlockLength >= chunkBuffer.Length)
                {
                    FillBuffer(mappedFile, chunkReadOffset, config);
                    chunkBufferReadOffset = 0;
                }
                //correct for 'end of block' so the buffer array is the right length.
                if (chunkReadOffset + config.BlockLength > chunkLength)
                {
                    windowSize = checked((int)(chunkLength - chunkReadOffset));
                    buffer = new byte[windowSize];
                }

                 Array.ConstrainedCopy(chunkBuffer,(int)chunkBufferReadOffset,buffer,0,windowSize);

                if (chunkBufferReadOffset == 0)
                {
                    windowWeakChecksum.Update(buffer);
                }
                else
                {
                    windowWeakChecksum.Update(buffer, buffer.Length - 1, 1);
                }

                var weakCheckSum = windowWeakChecksum.Value;

                FileHash match;
                remoteHashes.TryGetValue(weakCheckSum, out match);
                if (match != null)
                {
                    var strongHash = CalculateStrongHash(buffer);
                    if (strongHash.SequenceEqual(match.StrongHash))
                    {
                        if (chunkBufferReadOffset > 0)
                        {
                            var nonMatchEndOffset = fileBlockChunkStartOffset - 1;
                            var nonMatchingChunk = new FileChunk
                                                       {
                                                           IsMatch = false,
                                                           BlockLength = nonMatchEndOffset - nonMatchStartOffset,
                                                           SourceOffset = nonMatchStartOffset + fileBlockChunkStartOffset
                                                       };
                            outList.Add(nonMatchingChunk);
                            nonMatchStartOffset = chunkBufferReadOffset + config.BlockLength;
                        }

                        var matchingChunk = new FileChunk
                                                {
                                                    IsMatch = true,
                                                    DestinationOffset = chunkReadOffset,
                                                    SourceOffset = match.Offset,
                                                    BlockLength = config.BlockLength
                                                };
                        outList.Add(matchingChunk);
                        windowWeakChecksum.Reset();
                    }
                }

                if (chunkBufferReadOffset % 100 == 0)
                {
                    long offset = chunkBufferReadOffset;
                    progressReporter.ReportProgress(()=>
                                                         {
                                                             if (DiffBlockScanProgressChanged != null)
                                                                 DiffBlockScanProgressChanged(blockNumber,

                                                                                     ((int)
                                                                                      (offset/chunkLength*100)));

                                                         })
                    ;
                }
                chunkBufferReadOffset += 1;
                chunkReadOffset += 1;
            }
            if (chunkLength - nonMatchStartOffset > 1)
            {
                var nonMatchingChunk = new FileChunk { IsMatch = false, BlockLength = chunkLength - nonMatchStartOffset, SourceOffset = nonMatchStartOffset + chunkBufferReadOffset };
                outList.Add(nonMatchingChunk);
            }
            return outList;
        }
Esempio n. 42
0
 private void Open()
 {
     Opened = true;
     //Debug.Assert(SysEx.HASALIGNMENT8(this));
     //_memset(this, 0, sizeof(MemoryVFile));
     // clear
     First = null;
     _endpoint = new FilePoint();
     _readpoint = new FilePoint();
 }
Esempio n. 43
0
 /// <summary>
 /// Splits big file into some chunks by matching starting characters in each line
 /// </summary>
 /// <param name="inputFileName">Big file name</param>
 /// <param name="chars">Number of starting characters to split by</param>
 private void SplitFile(string inputFileName, int chars)
 {
     var files = new Dictionary<string, FileChunk>(Comparer);
     using (var sr = new StreamReader(inputFileName, Encoding))
     {
         while (sr.Peek() >= 0)
         {
             string entry = sr.ReadLine();
             //The length of the line is less than the current number of characters we split by
             //In this cases we add the line to the non-sorted file
             if (entry.Length < chars)
             {
                 ChunkInfo nameInfo;
                 if (!chunks.TryGetValue(entry, out nameInfo))
                     chunks.Add(entry, nameInfo = new ChunkInfo());
                 nameInfo.AddSmallString(entry, Encoding);
             }
             //Otherwise we add the line to the file corresponding to the first char characters of the line
             else
             {
                 string start = entry.Substring(0, chars);
                 FileChunk sfi;
                 if (!files.TryGetValue(start, out sfi))
                 {
                     sfi = new FileChunk(Encoding);
                     files.Add(start, sfi);
                 }
                 sfi.Append(entry, Encoding);
             }
         }
     }
     //For each of the chunk we check if size of the chunk is still greater than the maxFileSize
     foreach (var file in files)
     {
         file.Value.Close();
         //If it is - split to smaller chunks
         if (file.Value.Size > maxFileSize)
         {
             SplitFile(file.Value.FileName, chars + 1);
             File.Delete(file.Value.FileName);
         }
         //Otherwise save it to the dictionary
         else
         {
             SortFile(file.Value.FileName, file.Value.FileName);
             ChunkInfo nameInfo;
             if (!chunks.TryGetValue(file.Key, out nameInfo))
                 chunks.Add(file.Key, nameInfo = new ChunkInfo());
             nameInfo.FileName = file.Value.FileName;
         }
     }
 }
Esempio n. 44
0
 public override RC Truncate(long size)
 {
     SysEx.UNUSED_PARAMETER(size);
     Debug.Assert(size == 0);
     var pChunk = pFirst;
     while (pChunk != null)
     {
         var pTmp = pChunk;
         pChunk = pChunk.pNext;
     }
     // clear
     pFirst = null;
     endpoint = new FilePoint();
     readpoint = new FilePoint();
     return RC.OK;
 }