Copy() public static method

public static Copy ( void src, void dst, int count ) : void
src void
dst void
count int
return void
Esempio n. 1
0
            protected override void Copy(double[] source, int sourceIndex, int length, int stride)
            {
                fixed(double *pSrc = source)
                {
                    switch (stride)
                    {
                    case 1:
                        //Marshal.Copy(source, sourceIndex, new IntPtr(FPDst + Position), length);
                        Memory.Copy(FPDst + Position, pSrc + sourceIndex, (uint)length * sizeof(double));
                        break;

                    default:
                        double *src = pSrc + sourceIndex;
                        double *dst = FPDst + Position;

                        for (int i = 0; i < length; i++)
                        {
                            *dst = *(src++);
                            dst += stride;
                        }
                        break;
                    }
                }
            }
Esempio n. 2
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="offset"></param>
        /// <param name="length"></param>
        /// <param name="dest"></param>
        public override void ReadData(int offset, int length, IntPtr dest)
        {
            if (useShadowBuffer)
            {
                // lock the buffer for reading
                IntPtr src = shadowBuffer.Lock(offset, length, BufferLocking.ReadOnly);

                // copy that data in there
                Memory.Copy(src, dest, length);

                // unlock the buffer
                shadowBuffer.Unlock();
            }
            else
            {
                Gl.glBindBufferARB(Gl.GL_ARRAY_BUFFER_ARB, bufferID);

                Gl.glGetBufferSubDataARB(
                    Gl.GL_ARRAY_BUFFER_ARB,
                    offset,
                    length,
                    dest);
            }
        }
Esempio n. 3
0
            protected override void Copy(double[] destination, int destinationIndex, int length, int stride)
            {
                fixed(double *destinationPtr = destination)
                {
                    switch (stride)
                    {
                    case 1:
                        //Marshal.Copy(new IntPtr(FPData + Position), destination, destinationIndex, length);
                        Memory.Copy(destinationPtr + destinationIndex, FPData + Position, (uint)length * sizeof(double));
                        break;

                    default:
                        double *dst = destinationPtr + destinationIndex;
                        double *src = FPData + Position;

                        for (int i = 0; i < length; i++)
                        {
                            *(dst++) = *src;
                            src     += stride;
                        }
                        break;
                    }
                }
            }
Esempio n. 4
0
        public override unsafe void Write(Slice changeVector, Stream stream, byte[] tempBuffer, OutgoingReplicationStatsScope stats)
        {
            fixed(byte *pTemp = tempBuffer)
            {
                if (AssertChangeVectorSize() > tempBuffer.Length)
                {
                    ThrowTooManyChangeVectorEntries(this, Key.ToString());
                }

                var tempBufferPos = WriteCommon(changeVector, pTemp);

                *(int *)(pTemp + tempBufferPos) = Key.Size;
                tempBufferPos += sizeof(int);
                Memory.Copy(pTemp + tempBufferPos, Key.Content.Ptr, Key.Size);
                tempBufferPos += Key.Size;

                *(int *)(pTemp + tempBufferPos) = Segment.NumberOfBytes;
                tempBufferPos += sizeof(int);
                Memory.Copy(pTemp + tempBufferPos, Segment.Ptr, Segment.NumberOfBytes);
                tempBufferPos += Segment.NumberOfBytes;

                *(int *)(pTemp + tempBufferPos) = Collection.Size;
                tempBufferPos += sizeof(int);
                Memory.Copy(pTemp + tempBufferPos, Collection.Buffer, Collection.Size);
                tempBufferPos += Collection.Size;

                *(int *)(pTemp + tempBufferPos) = Name.Size;
                tempBufferPos += sizeof(int);
                Memory.Copy(pTemp + tempBufferPos, Name.Buffer, Name.Size);
                tempBufferPos += Name.Size;

                stream.Write(tempBuffer, 0, tempBufferPos);

                stats.RecordTimeSeriesOutput(Segment.NumberOfBytes);
            }
        }
Esempio n. 5
0
        public void Write(byte *buffer, int count)
        {
            Debug.Assert(count >= 0);               // count is a size
            Debug.Assert(buffer + count >= buffer); // overflow check

            if (count == 0)
            {
                return;
            }

            var head = _head;

            if (head.Allocation.SizeInBytes - head.Used > count)
            {
                //Unsafe.CopyBlock(head.Address + head.Used, buffer, (uint)count);
                Memory.Copy(head.Address + head.Used, buffer, count);
                head.AccumulatedSizeInBytes += count;
                head.Used += count;
            }
            else
            {
                throw new NotSupportedException();
            }
        }
Esempio n. 6
0
        private bool TryMergePages(Page parentPage, Page left, Page right)
        {
            TemporaryPage tmp;

            using (_tx.Environment.GetTemporaryPage(_tx, out tmp))
            {
                var mergedPage = tmp.GetTempPage(left.KeysPrefixed);
                Memory.Copy(mergedPage.Base, left.Base, left.PageSize);

                var previousSearchPosition = right.LastSearchPosition;

                for (int i = 0; i < right.NumberOfEntries; i++)
                {
                    right.LastSearchPosition = i;
                    var key  = GetActualKey(right, right.LastSearchPositionOrLastEntry);
                    var node = right.GetNode(i);

                    var prefixedKey = mergedPage.PrepareKeyToInsert(key, mergedPage.NumberOfEntries);

                    if (mergedPage.HasSpaceFor(_tx, SizeOf.NodeEntryWithAnotherKey(node, prefixedKey) + Constants.NodeOffsetSize + SizeOf.NewPrefix(prefixedKey)) == false)
                    {
                        right.LastSearchPosition = previousSearchPosition; //previous position --> prevent mutation of parameter
                        return(false);
                    }

                    mergedPage.CopyNodeDataToEndOfPage(node, prefixedKey);
                }

                Memory.Copy(left.Base, mergedPage.Base, left.PageSize);
            }

            parentPage.RemoveNode(parentPage.LastSearchPositionOrLastEntry); // unlink the right sibling
            _tree.FreePage(right);

            return(true);
        }
Esempio n. 7
0
        public void Write(byte *buffer, int count)
        {
            Debug.Assert(count >= 0);               // count is a size
            Debug.Assert(buffer + count >= buffer); // overflow check
            ThrowOnDisposed();

            if (count == 0)
            {
                return;
            }

            var head = _head;

            if (head.Allocation.SizeInBytes - head.Used > count)
            {
                Memory.Copy(head.Address + head.Used, buffer, (uint)count);
                head.AccumulatedSizeInBytes += count;
                head.Used += count;
            }
            else
            {
                WriteUnlikely(buffer, count);
            }
        }
        /// <summary>
        /// Makes a shadow copy of an index indirect block and updates a remote address.
        /// </summary>
        /// <param name="sourceBlockAddress">the address of the source.</param>
        /// <param name="destinationBlockAddress">the address of the destination. This can be the same as the source.</param>
        /// <param name="indexValue">the index value that goes in the footer of the file.</param>
        /// <param name="blockType">Gets the expected block type</param>
        /// <param name="remoteAddressOffset">the offset of the remote address that needs to be updated.</param>
        /// <param name="remoteBlockAddress">the value of the remote address.</param>
        private void ReadThenWriteIndexIndirectBlock(uint sourceBlockAddress, uint destinationBlockAddress, uint indexValue, BlockType blockType, int remoteAddressOffset, uint remoteBlockAddress)
        {
            DiskIoSession bufferSource = m_ioSessions.SourceIndex;

            if (sourceBlockAddress == destinationBlockAddress)
            {
                if (*(int *)(bufferSource.Pointer + (remoteAddressOffset << 2)) != remoteBlockAddress)
                {
                    bufferSource.WriteToExistingBlock(destinationBlockAddress, blockType, indexValue);

                    WriteIndexIndirectBlock(bufferSource.Pointer, remoteAddressOffset, remoteBlockAddress);
                }
            }
            else
            {
                bufferSource.ReadOld(sourceBlockAddress, blockType, indexValue);

                DiskIoSession destination = m_ioSessions.DestinationIndex;
                destination.WriteToNewBlock(destinationBlockAddress, blockType, indexValue);
                Memory.Copy(bufferSource.Pointer, destination.Pointer, destination.Length);
                WriteIndexIndirectBlock(destination.Pointer, remoteAddressOffset, remoteBlockAddress);
                m_ioSessions.SwapIndex();
            }
        }
Esempio n. 9
0
        public void CanComputeSmallDifference_AndThenApplyit()
        {
            var fst = new byte[4096];
            var sec = new byte[4096];
            var trd = new byte[4096];

            new Random().NextBytes(fst);
            Buffer.BlockCopy(fst, 0, sec, 0, fst.Length);

            sec[12]++;
            sec[433]++;

            fixed(byte *one = fst)
            fixed(byte *two = sec)
            fixed(byte *tri = trd)
            fixed(byte *tmp = new byte[4096])
            {
                var diffPages = new DiffPages
                {
                    Output = tmp,
                };

                diffPages.ComputeDiff(one, two, 4096);

                Memory.Copy(tri, one, 4096);
                new DiffApplier
                {
                    Destination = tri,
                    Diff        = tmp,
                    Size        = 4096,
                    DiffSize    = diffPages.OutputSize
                }.Apply();

                Assert.Equal(0, Memory.Compare(tri, two, 4096));
            }
        }
Esempio n. 10
0
        public void ChangePostponeDate(string id, DateTime?postponeUntil)
        {
            using (_contextPool.AllocateOperationContext(out TransactionOperationContext context))
                using (var tx = context.OpenWriteTransaction())
                {
                    var item = Get(id, context, tx);

                    if (item == null)
                    {
                        return;
                    }

                    var itemCopy = context.GetMemory(item.Json.Size);

                    Memory.Copy(itemCopy.Address, item.Json.BasePointer, item.Json.Size);

                    Store(context.GetLazyString(id), item.CreatedAt, postponeUntil,
                          //we create a copy because we can't update directy from mutated memory
                          new BlittableJsonReaderObject(itemCopy.Address, item.Json.Size, context)
                          , tx);

                    tx.Commit();
                }
        }
        private unsafe LazyStringValue CreateLazyStringValueFromParserState(JsonParserState state)
        {
            int escapePositionsCount = state.EscapePositions.Count;

            var maxSizeOfEscapePos = escapePositionsCount * 5 // max size of var int
                                     + JsonParserState.VariableSizeIntSize(escapePositionsCount);

            var mem = _ctx.GetMemory(maxSizeOfEscapePos + state.StringSize);

            _allocations.Add(mem);
            Memory.Copy(mem.Address, state.StringBuffer, state.StringSize);
            var lazyStringValueFromParserState = _ctx.AllocateStringValue(null, mem.Address, state.StringSize);

            if (escapePositionsCount > 0)
            {
                lazyStringValueFromParserState.EscapePositions = state.EscapePositions.ToArray();
            }
            else
            {
                lazyStringValueFromParserState.EscapePositions = Array.Empty <int>();
            }

            return(lazyStringValueFromParserState);
        }
Esempio n. 12
0
        public unsafe ReadWriteCompressedStream(Stream inner, JsonOperationContext.MemoryBuffer alreadyOnBuffer)
        {
            Stream innerInput = inner;
            int    valid      = alreadyOnBuffer.Valid - alreadyOnBuffer.Used;

            if (valid > 0)
            {
                byte[] buffer = ArrayPool <byte> .Shared.Rent(valid);
                fixed(byte *pBuffer = buffer)
                {
                    Memory.Copy(pBuffer, alreadyOnBuffer.Address + alreadyOnBuffer.Used, valid);
                }

                innerInput = new ConcatStream(new ConcatStream.RentedBuffer {
                    Buffer = buffer, Offset = 0, Count = valid
                }, inner);
                alreadyOnBuffer.Valid = alreadyOnBuffer.Used = 0; // consume all the data from the buffer
            }

            _inner   = innerInput ?? throw new ArgumentNullException(nameof(inner));
            _input   = ZstdStream.Decompress(inner);
            _output  = ZstdStream.Compress(inner);
            _dispose = new DisposeOnce <SingleAttempt>(DisposeInternal);
        }
Esempio n. 13
0
        /// <summary>
        /// </summary>
        /// <param name="offset"> </param>
        /// <param name="length"> </param>
        /// <param name="src"> </param>
        /// <param name="discardWholeBuffer"> </param>
        public override void WriteData(int offset, int length, BufferBase src, bool discardWholeBuffer)
        {
            OpenGL.BindBuffer(All.ElementArrayBuffer, this._bufferId);
            GLES2Config.GlCheckError(this);
            // Update the shadow buffer
            if (useShadowBuffer)
            {
                var destData = shadowBuffer.Lock(offset, length, discardWholeBuffer ? BufferLocking.Discard : BufferLocking.Normal);
                Memory.Copy(src, destData, length);
                shadowBuffer.Unlock();
            }

            var srcPtr = src.Ptr;

            if (offset == 0 && length == sizeInBytes)
            {
                OpenGL.BufferData(All.ElementArrayBuffer, new IntPtr(sizeInBytes), ref srcPtr, GLES2HardwareBufferManager.GetGLUsage(usage));
                GLES2Config.GlCheckError(this);
            }
            else
            {
                if (discardWholeBuffer)
                {
                    OpenGL.BufferData(All.ElementArrayBuffer, new IntPtr(sizeInBytes), IntPtr.Zero, GLES2HardwareBufferManager.GetGLUsage(usage));
                    GLES2Config.GlCheckError(this);
                }
                // Now update the real buffer
                OpenGL.BufferSubData(All.ElementArrayBuffer, new IntPtr(offset), new IntPtr(length), ref srcPtr);
                GLES2Config.GlCheckError(this);
            }

            if (src.Ptr != srcPtr)
            {
                LogManager.Instance.Write("[GLES2] HardwareIndexBuffer.WriteData - buffer pointer modified by GL.BufferData.");
            }
        }
Esempio n. 14
0
        public override Codec.DecodeResult Decode(Stream input)
        {
            // Buffer stream into memory (TODO: override IO functions instead?)
            var data = new byte[(int)input.Length];

            input.Read(data, 0, data.Length);
            FI.FIMEMORY          fiMem;
            FI.FREE_IMAGE_FORMAT ff;
            FI.FIBITMAP          fiBitmap;
            using (var datPtr = BufferBase.Wrap(data))
            {
                fiMem = FI.FreeImage.OpenMemory(datPtr.Pin(), (uint)data.Length);
                datPtr.UnPin();
                ff       = (FI.FREE_IMAGE_FORMAT) this._freeImageType;
                fiBitmap = FI.FreeImage.LoadFromMemory((FI.FREE_IMAGE_FORMAT) this._freeImageType, fiMem,
                                                       FI.FREE_IMAGE_LOAD_FLAGS.DEFAULT);
            }

            if (fiBitmap.IsNull)
            {
                throw new AxiomException("Error decoding image");
            }

            var imgData = new ImageData();

            imgData.depth      = 1; // only 2D formats handled by this codec
            imgData.width      = (int)FI.FreeImage.GetWidth(fiBitmap);
            imgData.height     = (int)FI.FreeImage.GetHeight(fiBitmap);
            imgData.numMipMaps = 0; // no mipmaps in non-DDS

            // Must derive format first, this may perform conversions
            var imageType = FI.FreeImage.GetImageType(fiBitmap);
            var colorType = FI.FreeImage.GetColorType(fiBitmap);
            var bpp       = (int)FI.FreeImage.GetBPP(fiBitmap);

            switch (imageType)
            {
            case FI.FREE_IMAGE_TYPE.FIT_UNKNOWN:
            case FI.FREE_IMAGE_TYPE.FIT_COMPLEX:
            case FI.FREE_IMAGE_TYPE.FIT_UINT32:
            case FI.FREE_IMAGE_TYPE.FIT_INT32:
            case FI.FREE_IMAGE_TYPE.FIT_DOUBLE:
            default:
                throw new AxiomException("Unknown or unsupported image format");

            case FI.FREE_IMAGE_TYPE.FIT_BITMAP:
                // Standard image type
                // Perform any colour conversions for greyscale
                if (colorType == FI.FREE_IMAGE_COLOR_TYPE.FIC_MINISWHITE || colorType == FI.FREE_IMAGE_COLOR_TYPE.FIC_MINISBLACK)
                {
                    var newBitmap = FI.FreeImage.ConvertToGreyscale(fiBitmap);
                    // free old bitmap and replace
                    FI.FreeImage.Unload(fiBitmap);
                    fiBitmap = newBitmap;
                    // get new formats
                    bpp       = (int)FI.FreeImage.GetBPP(fiBitmap);
                    colorType = FI.FreeImage.GetColorType(fiBitmap);
                }
                // Perform any colour conversions for RGB
                else if (bpp < 8 || colorType == FI.FREE_IMAGE_COLOR_TYPE.FIC_PALETTE ||
                         colorType == FI.FREE_IMAGE_COLOR_TYPE.FIC_CMYK)
                {
                    var newBitmap = FI.FreeImage.ConvertTo24Bits(fiBitmap);
                    // free old bitmap and replace
                    FI.FreeImage.Unload(fiBitmap);
                    fiBitmap = newBitmap;
                    // get new formats
                    bpp       = (int)FI.FreeImage.GetBPP(fiBitmap);
                    colorType = FI.FreeImage.GetColorType(fiBitmap);
                }

                // by this stage, 8-bit is greyscale, 16/24/32 bit are RGB[A]
                switch (bpp)
                {
                case 8:
                    imgData.format = PixelFormat.L8;
                    break;

                case 16:
                    // Determine 555 or 565 from green mask
                    // cannot be 16-bit greyscale since that's FIT_UINT16
                    if (FI.FreeImage.GetGreenMask(fiBitmap) == FI.FreeImage.FI16_565_GREEN_MASK)
                    {
                        imgData.format = PixelFormat.R5G6B5;
                    }
                    else
                    {
                        // FreeImage doesn't support 4444 format so must be 1555
                        imgData.format = PixelFormat.A1R5G5B5;
                    }
                    break;

                case 24:
                    // FreeImage differs per platform
                    //     PixelFormat.BYTE_BGR[A] for little endian (== PixelFormat.ARGB native)
                    //     PixelFormat.BYTE_RGB[A] for big endian (== PixelFormat.RGBA native)
                    if (FI.FreeImage.IsLittleEndian())
                    {
                        imgData.format = PixelFormat.BYTE_BGR;
                    }
                    else
                    {
                        imgData.format = PixelFormat.BYTE_RGB;
                    }
                    break;

                case 32:
                    if (FI.FreeImage.IsLittleEndian())
                    {
                        imgData.format = PixelFormat.BYTE_BGRA;
                    }
                    else
                    {
                        imgData.format = PixelFormat.BYTE_RGBA;
                    }
                    break;
                }
                ;
                break;

            case FI.FREE_IMAGE_TYPE.FIT_UINT16:
            case FI.FREE_IMAGE_TYPE.FIT_INT16:
                // 16-bit greyscale
                imgData.format = PixelFormat.L16;
                break;

            case FI.FREE_IMAGE_TYPE.FIT_FLOAT:
                // Single-component floating point data
                imgData.format = PixelFormat.FLOAT32_R;
                break;

            case FI.FREE_IMAGE_TYPE.FIT_RGB16:
                imgData.format = PixelFormat.SHORT_RGB;
                break;

            case FI.FREE_IMAGE_TYPE.FIT_RGBA16:
                imgData.format = PixelFormat.SHORT_RGBA;
                break;

            case FI.FREE_IMAGE_TYPE.FIT_RGBF:
                imgData.format = PixelFormat.FLOAT32_RGB;
                break;

            case FI.FREE_IMAGE_TYPE.FIT_RGBAF:
                imgData.format = PixelFormat.FLOAT32_RGBA;
                break;
            }

            var srcPitch = (int)FI.FreeImage.GetPitch(fiBitmap);

            // Final data - invert image and trim pitch at the same time
            var dstPitch = imgData.width * PixelUtil.GetNumElemBytes(imgData.format);

            imgData.size = dstPitch * imgData.height;
            // Bind output buffer
            var outputData = new byte[imgData.size];

            using (var srcData = BufferBase.Wrap(FI.FreeImage.GetBits(fiBitmap), imgData.height * srcPitch))
            {
                var pDst = BufferBase.Wrap(outputData);

                for (var y = 0; y < imgData.height; ++y)
                {
                    using (var pSrc = srcData + (imgData.height - y - 1) * srcPitch)
                    {
                        Memory.Copy(pSrc, pDst, dstPitch);
                        pDst += dstPitch;
                    }
                }

                pDst.Dispose();
            }

            FI.FreeImage.Unload(fiBitmap);
            FI.FreeImage.CloseMemory(fiMem);

            return(new Codec.DecodeResult(new MemoryStream(outputData), imgData));
        }
Esempio n. 15
0
        private FI.FIBITMAP _encode(Stream input, CodecData codecData)
        {
            var ret = new FI.FIBITMAP();

            ret.SetNull();
            var imgData = codecData as ImageData;

            if (imgData != null)
            {
                var data = new byte[(int)input.Length];
                input.Read(data, 0, data.Length);
                var dataPtr = BufferBase.Wrap(data);
                var src     = new PixelBox(imgData.width, imgData.height, imgData.depth, imgData.format, dataPtr);

                // The required format, which will adjust to the format
                // actually supported by FreeImage.
                var requiredFormat = imgData.format;

                // determine the settings
                var imageType         = FI.FREE_IMAGE_TYPE.FIT_UNKNOWN;
                var determiningFormat = imgData.format;

                switch (determiningFormat)
                {
                case PixelFormat.R5G6B5:
                case PixelFormat.B5G6R5:
                case PixelFormat.R8G8B8:
                case PixelFormat.B8G8R8:
                case PixelFormat.A8R8G8B8:
                case PixelFormat.X8R8G8B8:
                case PixelFormat.A8B8G8R8:
                case PixelFormat.X8B8G8R8:
                case PixelFormat.B8G8R8A8:
                case PixelFormat.R8G8B8A8:
                case PixelFormat.A4L4:
                case PixelFormat.BYTE_LA:
                case PixelFormat.R3G3B2:
                case PixelFormat.A4R4G4B4:
                case PixelFormat.A1R5G5B5:
                case PixelFormat.A2R10G10B10:
                case PixelFormat.A2B10G10R10:
                    // I'd like to be able to use r/g/b masks to get FreeImage to load the data
                    // in it's existing format, but that doesn't work, FreeImage needs to have
                    // data in RGB[A] (big endian) and BGR[A] (little endian), always.
                    if (PixelUtil.HasAlpha(determiningFormat))
                    {
                        if (FI.FreeImageEngine.IsLittleEndian)
                        {
                            requiredFormat = PixelFormat.BYTE_BGRA;
                        }
                        else
                        {
                            requiredFormat = PixelFormat.BYTE_RGBA;
                        }
                    }
                    else
                    {
                        if (FI.FreeImageEngine.IsLittleEndian)
                        {
                            requiredFormat = PixelFormat.BYTE_BGR;
                        }
                        else
                        {
                            requiredFormat = PixelFormat.BYTE_RGB;
                        }
                    }
                    imageType = FI.FREE_IMAGE_TYPE.FIT_BITMAP;
                    break;

                case PixelFormat.L8:
                case PixelFormat.A8:
                    imageType = FI.FREE_IMAGE_TYPE.FIT_BITMAP;
                    break;

                case PixelFormat.L16:
                    imageType = FI.FREE_IMAGE_TYPE.FIT_UINT16;
                    break;

                case PixelFormat.SHORT_GR:
                    requiredFormat = PixelFormat.SHORT_RGB;
                    break;

                case PixelFormat.SHORT_RGB:
                    imageType = FI.FREE_IMAGE_TYPE.FIT_RGB16;
                    break;

                case PixelFormat.SHORT_RGBA:
                    imageType = FI.FREE_IMAGE_TYPE.FIT_RGBA16;
                    break;

                case PixelFormat.FLOAT16_R:
                    requiredFormat = PixelFormat.FLOAT32_R;
                    break;

                case PixelFormat.FLOAT32_R:
                    imageType = FI.FREE_IMAGE_TYPE.FIT_FLOAT;
                    break;

                case PixelFormat.FLOAT16_GR:
                case PixelFormat.FLOAT16_RGB:
                case PixelFormat.FLOAT32_GR:
                    requiredFormat = PixelFormat.FLOAT32_RGB;
                    break;

                case PixelFormat.FLOAT32_RGB:
                    imageType = FI.FREE_IMAGE_TYPE.FIT_RGBF;
                    break;

                case PixelFormat.FLOAT16_RGBA:
                    requiredFormat = PixelFormat.FLOAT32_RGBA;
                    break;

                case PixelFormat.FLOAT32_RGBA:
                    imageType = FI.FREE_IMAGE_TYPE.FIT_RGBAF;
                    break;

                default:
                    throw new AxiomException("Not Supported image format :{0}", determiningFormat.ToString());
                } //end switch

                // Check support for this image type & bit depth
                if (!FI.FreeImage.FIFSupportsExportType((FI.FREE_IMAGE_FORMAT) this._freeImageType, imageType) ||
                    !FI.FreeImage.FIFSupportsExportBPP((FI.FREE_IMAGE_FORMAT) this._freeImageType,
                                                       PixelUtil.GetNumElemBits(requiredFormat)))
                {
                    // Ok, need to allocate a fallback
                    // Only deal with RGBA . RGB for now
                    switch (requiredFormat)
                    {
                    case PixelFormat.BYTE_RGBA:
                        requiredFormat = PixelFormat.BYTE_RGB;
                        break;

                    case PixelFormat.BYTE_BGRA:
                        requiredFormat = PixelFormat.BYTE_BGR;
                        break;

                    default:
                        break;
                    }
                }

                var conversionRequired = false;
                input.Position = 0;
                var srcData = new byte[(int)input.Length];
                input.Read(srcData, 0, srcData.Length);
                var srcDataPtr = BufferBase.Wrap(srcData);

                // Check BPP
                var bpp = PixelUtil.GetNumElemBits(requiredFormat);
                if (!FI.FreeImage.FIFSupportsExportBPP((FI.FREE_IMAGE_FORMAT) this._freeImageType, bpp))
                {
                    if (bpp == 32 && PixelUtil.HasAlpha(imgData.format) &&
                        FI.FreeImage.FIFSupportsExportBPP((FI.FREE_IMAGE_FORMAT) this._freeImageType, 24))
                    {
                        // drop to 24 bit (lose alpha)
                        if (FI.FreeImage.IsLittleEndian())
                        {
                            requiredFormat = PixelFormat.BYTE_BGR;
                        }
                        else
                        {
                            requiredFormat = PixelFormat.BYTE_RGB;
                        }

                        bpp = 24;
                    }
                    else if (bpp == 128 && PixelUtil.HasAlpha(imgData.format) &&
                             FI.FreeImage.FIFSupportsExportBPP((FI.FREE_IMAGE_FORMAT) this._freeImageType, 96))
                    {
                        // drop to 96-bit floating point
                        requiredFormat = PixelFormat.FLOAT32_RGB;
                    }
                }

                var convBox = new PixelBox(imgData.width, imgData.height, 1, requiredFormat);
                if (requiredFormat != imgData.format)
                {
                    conversionRequired = true;
                    // Allocate memory
                    var convData = new byte[convBox.ConsecutiveSize];
                    convBox.Data = BufferBase.Wrap(convData);
                    // perform conversion and reassign source
                    var newSrc = new PixelBox(imgData.width, imgData.height, 1, imgData.format, dataPtr);
                    PixelConverter.BulkPixelConversion(newSrc, convBox);
                    srcDataPtr = convBox.Data;
                }

                ret = FI.FreeImage.AllocateT(imageType, imgData.width, imgData.height, bpp);
                if (ret.IsNull)
                {
                    if (conversionRequired)
                    {
                        srcDataPtr.SafeDispose();
                        convBox = null;
                    }

                    throw new AxiomException("FreeImage.AllocateT failed - possibly out of memory. ");
                }

                if (requiredFormat == PixelFormat.L8 || requiredFormat == PixelFormat.A8)
                {
                    // Must explicitly tell FreeImage that this is greyscale by setting
                    // a "grey" palette (otherwise it will save as a normal RGB
                    // palettized image).
                    var tmp = FI.FreeImage.ConvertToGreyscale(ret);
                    FI.FreeImage.Unload(ret);
                    ret = tmp;
                }

                var dstPitch = (int)FI.FreeImage.GetPitch(ret);
                var srcPitch = imgData.width * PixelUtil.GetNumElemBytes(requiredFormat);

                // Copy data, invert scanlines and respect FreeImage pitch
                var pSrc = srcDataPtr;
                using (var pDest = BufferBase.Wrap(FI.FreeImage.GetBits(ret), imgData.height * srcPitch))
                {
                    var byteSrcData = pSrc;
                    var byteDstData = pDest;
                    for (var y = 0; y < imgData.height; ++y)
                    {
                        byteSrcData += (imgData.height - y - 1) * srcPitch;
                        Memory.Copy(pSrc, pDest, srcPitch);
                        byteDstData += dstPitch;
                    }
                }

                if (conversionRequired)
                {
                    // delete temporary conversion area
                    srcDataPtr.SafeDispose();
                    convBox = null;
                }
            }
            return(ret);
        }
Esempio n. 16
0
        public bool Initialize()
        {
            _locker.EnterWriteLock();
            try
            {
                if (_theHeader == null)
                {
                    throw new ObjectDisposedException("Cannot access the header after it was disposed");
                }

                var headers    = stackalloc FileHeader[2];
                var f1         = &headers[0];
                var f2         = &headers[1];
                var hasHeader1 = _env.Options.ReadHeader(HeaderFileNames[0], f1);
                var hasHeader2 = _env.Options.ReadHeader(HeaderFileNames[1], f2);
                if (hasHeader1 == false && hasHeader2 == false)
                {
                    // new
                    FillInEmptyHeader(f1);
                    FillInEmptyHeader(f2);
                    _env.Options.WriteHeader(HeaderFileNames[0], f1);
                    _env.Options.WriteHeader(HeaderFileNames[1], f2);

                    Memory.Copy((byte *)_theHeader, (byte *)f1, sizeof(FileHeader));
                    return(true); // new
                }

                if (f1->MagicMarker != Constants.MagicMarker && f2->MagicMarker != Constants.MagicMarker)
                {
                    throw new InvalidDataException("None of the header files start with the magic marker, probably not db files");
                }

                // if one of the files is corrupted, but the other isn't, restore to the valid file
                if (f1->MagicMarker != Constants.MagicMarker)
                {
                    *f1 = *f2;
                }
                if (f2->MagicMarker != Constants.MagicMarker)
                {
                    *f2 = *f1;
                }

                if (f1->Version != Constants.CurrentVersion)
                {
                    throw new InvalidDataException("This is a db file for version " + f1->Version + ", which is not compatible with the current version " + Constants.CurrentVersion + Environment.NewLine +
                                                   "Error at " + _env.Options.BasePath);
                }

                if (f1->TransactionId < 0)
                {
                    throw new InvalidDataException("The transaction number cannot be negative");
                }


                if (f1->HeaderRevision > f2->HeaderRevision)
                {
                    Memory.Copy((byte *)_theHeader, (byte *)f1, sizeof(FileHeader));
                }
                else
                {
                    Memory.Copy((byte *)_theHeader, (byte *)f2, sizeof(FileHeader));
                }
                _revision = _theHeader->HeaderRevision;
                return(false);
            }
            finally
            {
                _locker.ExitWriteLock();
            }
        }
Esempio n. 17
0
        private RawDataSmallPageHeader *DefragPage(RawDataSmallPageHeader *pageHeader)
        {
            pageHeader = ModifyPage(pageHeader);

            if (pageHeader->NumberOfEntries == 0)
            {
                pageHeader->NextAllocation = (ushort)sizeof(RawDataSmallPageHeader);
                Memory.Set((byte *)pageHeader + pageHeader->NextAllocation, 0,
                           Constants.Storage.PageSize - pageHeader->NextAllocation);

                return(pageHeader);
            }


            TemporaryPage tmp;

            using (_tx.Environment.GetTemporaryPage(_tx, out tmp))
            {
                var maxUsedPos = pageHeader->NextAllocation;
                Memory.Copy(tmp.TempPagePointer, (byte *)pageHeader, Constants.Storage.PageSize);

                pageHeader->NextAllocation = (ushort)sizeof(RawDataSmallPageHeader);
                Memory.Set((byte *)pageHeader + pageHeader->NextAllocation, 0,
                           Constants.Storage.PageSize - pageHeader->NextAllocation);

                pageHeader->NumberOfEntries = 0;
                var pos = pageHeader->NextAllocation;
                while (pos < maxUsedPos)
                {
                    var oldSize = (RawDataEntrySizes *)(tmp.TempPagePointer + pos);

                    if (oldSize->AllocatedSize <= 0)
                    {
                        VoronUnrecoverableErrorException.Raise(_tx, $"Allocated size cannot be zero or negative, but was {oldSize->AllocatedSize} in page {pageHeader->PageNumber}");
                    }

                    if (oldSize->UsedSize < 0)
                    {
                        pos += (ushort)(oldSize->AllocatedSize + sizeof(RawDataEntrySizes));
                        continue; // this was freed
                    }
                    var prevId = (pageHeader->PageNumber) * Constants.Storage.PageSize + pos;
                    var newId  = (pageHeader->PageNumber) * Constants.Storage.PageSize + pageHeader->NextAllocation;
                    if (prevId != newId)
                    {
                        OnDataMoved(prevId, newId, tmp.TempPagePointer + pos + sizeof(RawDataEntrySizes), oldSize->UsedSize);
                    }

                    var newSize = (RawDataEntrySizes *)(((byte *)pageHeader) + pageHeader->NextAllocation);
                    newSize->AllocatedSize      = oldSize->AllocatedSize;
                    newSize->UsedSize           = oldSize->UsedSize;
                    pageHeader->NextAllocation += (ushort)sizeof(RawDataEntrySizes);
                    pageHeader->NumberOfEntries++;
                    Memory.Copy(((byte *)pageHeader) + pageHeader->NextAllocation, tmp.TempPagePointer + pos + sizeof(RawDataEntrySizes),
                                oldSize->UsedSize);

                    pageHeader->NextAllocation += (ushort)oldSize->AllocatedSize;
                    pos += (ushort)(oldSize->AllocatedSize + sizeof(RawDataEntrySizes));
                }
            }
            return(pageHeader);
        }
Esempio n. 18
0
 public unsafe override void Apply(ColorBgra *dst, ColorBgra *src, int length)
 {
     Memory.Copy(dst, src, (ulong)length * (ulong)ColorBgra.SizeOf);
 }
Esempio n. 19
0
        private void MaybeTrainCompressionDictionary(Table table, FixedSizeTree etagsTree)
        {
            // the idea is that we'll get better results by including the most recently modified documents
            // by iterating over the tag index, which is guaranteed to be always increasing
            var dataIds = ArrayPool <long> .Shared.Rent(256);

            var sizes = ArrayPool <UIntPtr> .Shared.Rent(256);

            try
            {
                int used         = 0;
                var totalSize    = 0;
                int totalSkipped = 0;

                using (var it = etagsTree.Iterate())
                {
                    if (it.SeekToLast() == false)
                    {
                        return; // empty table, nothing to train on
                    }
                    do
                    {
                        long id = it.CreateReaderForCurrent().ReadLittleEndianInt64();
                        table.DirectRead(id, out var size);
                        if (size > 32 * 1024)
                        {
                            if (totalSkipped++ > 16 * 1024)
                            {
                                return;  // we are scanning too much, no need to try this hard
                            }
                            // we don't want to skip documents that are too big, they will compress
                            // well on their own, and likely be *too* unique to add meaningfully to the
                            // dictionary
                            continue;
                        }

                        sizes[used]     = (UIntPtr)size;
                        dataIds[used++] = id;
                        totalSize      += size;
                    } while (used < 256 && it.MovePrev() && totalSize < 1024 * 1024);
                }

                if (used < 16)
                {
                    return;// too few samples to measure
                }
                var tx = table._tx;
                using (tx.Allocator.Allocate(totalSize, out var buffer))
                {
                    var cur = buffer.Ptr;
                    for (int i = 0; i < used; i++)
                    {
                        var ptr = table.DirectRead(dataIds[i], out var size);
                        Memory.Copy(cur, ptr, size);
                        cur += size;
                    }

                    using (tx.Allocator.Allocate(
                               // the dictionary
                               Constants.Storage.PageSize - PageHeader.SizeOf - sizeof(CompressionDictionaryInfo)
                               , out var dictionaryBuffer))
                    {
                        Span <byte> dictionaryBufferSpan = dictionaryBuffer.ToSpan();
                        ZstdLib.Train(new ReadOnlySpan <byte>(buffer.Ptr, totalSize),
                                      new ReadOnlySpan <UIntPtr>(sizes, 0, used),
                                      ref dictionaryBufferSpan);

                        var dictionariesTree = tx.CreateTree(TableSchema.CompressionDictionariesSlice);

                        var newId = (int)(dictionariesTree.State.NumberOfEntries + 1);

                        using var compressionDictionary = new ZstdLib.CompressionDictionary(newId, dictionaryBuffer.Ptr, dictionaryBufferSpan.Length, 3);

                        if (ShouldReplaceDictionary(tx, compressionDictionary) == false)
                        {
                            return;
                        }

                        table.CurrentCompressionDictionaryId           = newId;
                        compressionDictionary.ExpectedCompressionRatio = GetCompressionRatio(CompressedBuffer.Length, RawBuffer.Length);

                        var rev = Bits.SwapBytes(newId);
                        using (Slice.External(tx.Allocator, (byte *)&rev, sizeof(int), out var slice))
                            using (dictionariesTree.DirectAdd(slice, sizeof(CompressionDictionaryInfo) + dictionaryBufferSpan.Length, out var dest))
                            {
                                *((CompressionDictionaryInfo *)dest) =
                                    new CompressionDictionaryInfo {
                                    ExpectedCompressionRatio = compressionDictionary.ExpectedCompressionRatio
                                };
                                Memory.Copy(dest + sizeof(CompressionDictionaryInfo), dictionaryBuffer.Ptr, dictionaryBufferSpan.Length);
                            }

                        tx.LowLevelTransaction.OnDispose += RecreateRecoveryDictionaries;
                    }
                }
            }
            finally
            {
                ArrayPool <long> .Shared.Return(dataIds);

                ArrayPool <UIntPtr> .Shared.Return(sizes);
            }
        }
Esempio n. 20
0
        private unsafe bool InstallSnapshot(TransactionOperationContext context)
        {
            var txw    = context.Transaction.InnerTransaction;
            var sp     = Stopwatch.StartNew();
            var reader = _connection.CreateReader();

            while (true)
            {
                var type = reader.ReadInt32();
                if (type == -1)
                {
                    return(false);
                }

                int  size;
                long entries;
                switch ((RootObjectType)type)
                {
                case RootObjectType.None:
                    return(true);

                case RootObjectType.VariableSizeTree:

                    size = reader.ReadInt32();
                    reader.ReadExactly(size);
                    Slice treeName;    // will be freed on context close
                    Slice.From(context.Allocator, reader.Buffer, 0, size, ByteStringType.Immutable, out treeName);
                    txw.DeleteTree(treeName);
                    var tree = txw.CreateTree(treeName);

                    entries = reader.ReadInt64();
                    for (long i = 0; i < entries; i++)
                    {
                        MaybeNotifyLeaderThatWeAreStillAlive(context, sp);

                        size = reader.ReadInt32();
                        reader.ReadExactly(size);
                        using (Slice.From(context.Allocator, reader.Buffer, 0, size, ByteStringType.Immutable, out Slice valKey))
                        {
                            size = reader.ReadInt32();
                            reader.ReadExactly(size);

                            using (tree.DirectAdd(valKey, size, out byte *ptr))
                            {
                                fixed(byte *pBuffer = reader.Buffer)
                                {
                                    Memory.Copy(ptr, pBuffer, size);
                                }
                            }
                        }
                    }


                    break;

                case RootObjectType.Table:

                    size = reader.ReadInt32();
                    reader.ReadExactly(size);
                    Slice tableName;    // will be freed on context close
                    Slice.From(context.Allocator, reader.Buffer, 0, size, ByteStringType.Immutable,
                               out tableName);
                    var tableTree = txw.ReadTree(tableName, RootObjectType.Table);

                    // Get the table schema
                    var schemaSize = tableTree.GetDataSize(TableSchema.SchemasSlice);
                    var schemaPtr  = tableTree.DirectRead(TableSchema.SchemasSlice);
                    if (schemaPtr == null)
                    {
                        throw new InvalidOperationException(
                                  "When trying to install snapshot, found missing table " + tableName);
                    }

                    var schema = TableSchema.ReadFrom(txw.Allocator, schemaPtr, schemaSize);

                    var table = txw.OpenTable(schema, tableName);

                    // delete the table
                    TableValueReader tvr;
                    while (true)
                    {
                        if (table.SeekOnePrimaryKey(Slices.AfterAllKeys, out tvr) == false)
                        {
                            break;
                        }
                        table.Delete(tvr.Id);

                        MaybeNotifyLeaderThatWeAreStillAlive(context, sp);
                    }

                    entries = reader.ReadInt64();
                    for (long i = 0; i < entries; i++)
                    {
                        MaybeNotifyLeaderThatWeAreStillAlive(context, sp);

                        size = reader.ReadInt32();
                        reader.ReadExactly(size);
                        fixed(byte *pBuffer = reader.Buffer)
                        {
                            tvr = new TableValueReader(pBuffer, size);
                            table.Insert(ref tvr);
                        }
                    }
                    break;

                default:
                    throw new ArgumentOutOfRangeException(nameof(type), type.ToString());
                }
            }
        }
Esempio n. 21
0
        public bool Initialize()
        {
            _locker.EnterWriteLock();
            try
            {
                if (_theHeader == null)
                {
                    throw new ObjectDisposedException("Cannot access the header after it was disposed");
                }

                var headers    = stackalloc FileHeader[2];
                var f1         = &headers[0];
                var f2         = &headers[1];
                var hasHeader1 = _env.Options.ReadHeader(HeaderFileNames[0], f1);
                var hasHeader2 = _env.Options.ReadHeader(HeaderFileNames[1], f2);
                if (hasHeader1 == false && hasHeader2 == false)
                {
                    // new
                    FillInEmptyHeader(f1);
                    FillInEmptyHeader(f2);
                    f1->Hash = CalculateFileHeaderHash(f1);
                    f2->Hash = CalculateFileHeaderHash(f2);
                    _env.Options.WriteHeader(HeaderFileNames[0], f1);
                    _env.Options.WriteHeader(HeaderFileNames[1], f2);

                    Memory.Copy((byte *)_theHeader, (byte *)f1, sizeof(FileHeader));
                    return(true); // new
                }

                if (f1->MagicMarker != Constants.MagicMarker && f2->MagicMarker != Constants.MagicMarker)
                {
                    throw new InvalidDataException("None of the header files start with the magic marker, probably not db files or fatal corruption on " + _env.Options.BasePath);
                }

                if (!ValidHash(f1) && !ValidHash(f2))
                {
                    throw new InvalidDataException("None of the header files have a valid hash, possible corruption on " + _env.Options.BasePath);
                }

                // if one of the files is corrupted, but the other isn't, restore to the valid file
                if (f1->MagicMarker != Constants.MagicMarker || !ValidHash(f1))
                {
                    *f1 = *f2;
                }

                if (f2->MagicMarker != Constants.MagicMarker || !ValidHash(f2))
                {
                    *f2 = *f1;
                }

                if (f1->TransactionId < 0)
                {
                    throw new InvalidDataException("The transaction number cannot be negative on " + _env.Options.BasePath);
                }

                if (f1->HeaderRevision > f2->HeaderRevision)
                {
                    Memory.Copy((byte *)_theHeader, (byte *)f1, sizeof(FileHeader));
                }
                else
                {
                    Memory.Copy((byte *)_theHeader, (byte *)f2, sizeof(FileHeader));
                }
                _revision = _theHeader->HeaderRevision;

                if (_theHeader->Version != Constants.CurrentVersion)
                {
                    _locker.ExitWriteLock();
                    try
                    {
                        var updater = new VoronSchemaUpdater(this, _env.Options);

                        updater.Update();
                    }
                    finally
                    {
                        _locker.EnterWriteLock();
                    }

                    if (_theHeader->Version != Constants.CurrentVersion)
                    {
                        throw new SchemaErrorException(
                                  $"The db file is for version {_theHeader->Version}, which is not compatible with the current version {Constants.CurrentVersion} on {_env.Options.BasePath}");
                    }
                }

                if (_theHeader->PageSize != Constants.Storage.PageSize)
                {
                    var message = string.Format("PageSize mismatch, configured to be {0:#,#} but was {1:#,#}, using the actual value in the file {1:#,#}",
                                                Constants.Storage.PageSize, _theHeader->PageSize);
                    _env.Options.InvokeRecoveryError(this, message, null);
                }

                if (IsEmptyHeader(_theHeader))
                {
                    // db was not initialized - new db
                    return(true);
                }

                return(false);
            }
            finally
            {
                _locker.ExitWriteLock();
            }
        }
Esempio n. 22
0
 private unsafe static void BlockCopy(byte *src, byte *dest, int len)
 {
     Memory.Copy(dest, src, len);
 }
Esempio n. 23
0
        private bool TryReadAndValidateHeader(StorageEnvironmentOptions options, out TransactionHeader *current)
        {
            if (_readAt4Kb > _journalPagerNumberOfAllocated4Kb)
            {
                current = null;
                return(false); // end of jouranl
            }

            const int pageTo4KbRatio     = Constants.Storage.PageSize / (4 * Constants.Size.Kilobyte);
            var       pageNumber         = _readAt4Kb / pageTo4KbRatio;
            var       positionInsidePage = (_readAt4Kb % pageTo4KbRatio) * (4 * Constants.Size.Kilobyte);

            current = (TransactionHeader *)
                      (_journalPager.AcquirePagePointer(this, pageNumber) + positionInsidePage);

            // due to the reuse of journals we no longer can assume we have zeros in the end of the journal
            // we might have there random garbage or old transactions we can ignore, so we have the following scenarios:
            // * TxId <= current Id      ::  we can ignore old transaction of the reused journal and continue
            // * TxId == current Id + 1  ::  valid, but if hash is invalid. Transaction hasn't been committed
            // * TxId >  current Id + 1  ::  if hash is invalid we can ignore reused/random, but if hash valid then we might missed TXs

            if (current->HeaderMarker != Constants.TransactionHeaderMarker)
            {
                // not a transaction page,

                // if the header marker is zero or garbage, we are probably in the area at the end of the log file, and have no additional log records
                // to read from it. This can happen if the next transaction was too big to fit in the current log file. We stop reading
                // this log file and move to the next one, or it might have happened because of reuse of journal file

                // note : we might encounter a "valid" TransactionHeaderMarker which is still garbage, so we will test that later on

                RequireHeaderUpdate = false;
                return(false);
            }

            if (current->TransactionId < 0)
            {
                return(false);
            }

            current = EnsureTransactionMapped(current, pageNumber, positionInsidePage);
            bool hashIsValid;

            if (options.Encryption.IsEnabled)
            {
                // We use temp buffers to hold the transaction before decrypting, and release the buffers afterwards.
                var pagesSize = current->CompressedSize != -1 ? current->CompressedSize : current->UncompressedSize;
                var size      = (4 * Constants.Size.Kilobyte) * GetNumberOf4KbFor(sizeof(TransactionHeader) + pagesSize);

                var ptr    = PlatformSpecific.NativeMemory.Allocate4KbAlignedMemory(size, out var thread);
                var buffer = new EncryptionBuffer
                {
                    Pointer          = ptr,
                    Size             = size,
                    AllocatingThread = thread
                };

                _encryptionBuffers.Add(buffer);
                Memory.Copy(buffer.Pointer, (byte *)current, size);
                current = (TransactionHeader *)buffer.Pointer;

                try
                {
                    DecryptTransaction((byte *)current, options);
                    hashIsValid = true;
                }
                catch (InvalidOperationException ex)
                {
                    if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options))
                    {
                        options.InvokeIntegrityErrorOfAlreadySyncedData(this,
                                                                        $"Unable to decrypt data of transaction which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). " +
                                                                        "Safely continuing the startup recovery process.",
                                                                        ex);

                        return(true);
                    }

                    RequireHeaderUpdate = true;
                    options.InvokeRecoveryError(this, "Transaction " + current->TransactionId + " was not committed", ex);
                    return(false);
                }
            }
            else
            {
                hashIsValid = ValidatePagesHash(options, current);
            }

            long lastTxId;

            if (LastTransactionHeader != null)
            {
                lastTxId = LastTransactionHeader->TransactionId;
            }
            else
            {
                // this is first transaction being processed in the recovery process

                if (_journalInfo.LastSyncedTransactionId == -1 || current->TransactionId <= _journalInfo.LastSyncedTransactionId)
                {
                    if (hashIsValid == false && CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options))
                    {
                        options.InvokeIntegrityErrorOfAlreadySyncedData(this,
                                                                        $"Invalid hash of data of first transaction which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). " +
                                                                        "Safely continuing the startup recovery process.", null);

                        return(true);
                    }

                    if (hashIsValid && _firstValidTransactionHeader == null)
                    {
                        _firstValidTransactionHeader = current;
                    }

                    return(hashIsValid);
                }

                lastTxId = _journalInfo.LastSyncedTransactionId;
            }

            var txIdDiff = current->TransactionId - lastTxId;

            // 1 is a first storage transaction which does not increment transaction counter after commit
            if (current->TransactionId != 1)
            {
                if (txIdDiff < 0)
                {
                    if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options))
                    {
                        options.InvokeIntegrityErrorOfAlreadySyncedData(this,
                                                                        $"Encountered integrity error of transaction data which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). Negative tx id diff: {txIdDiff}. " +
                                                                        "Safely continuing the startup recovery process.", null);

                        return(true);
                    }

                    return(false);
                }

                if (txIdDiff > 1 || txIdDiff == 0)
                {
                    if (hashIsValid)
                    {
                        // TxId is bigger then the last one by more than '1' but has valid hash which mean we lost transactions in the middle

                        if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options))
                        {
                            // when running in ignore data integrity errors mode then we could skip corrupted but already sync data
                            // so it's expected in this case that txIdDiff > 1, let it continue to work then

                            options.InvokeIntegrityErrorOfAlreadySyncedData(this,
                                                                            $"Encountered integrity error of transaction data which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). Tx diff is: {txIdDiff}. " +
                                                                            $"Safely continuing the startup recovery process. Debug details - file header {_currentFileHeader}", null);

                            return(true);
                        }

                        if (LastTransactionHeader != null)
                        {
                            throw new InvalidJournalException(
                                      $"Transaction has valid(!) hash with invalid transaction id {current->TransactionId}, the last valid transaction id is {LastTransactionHeader->TransactionId}. Tx diff is: {txIdDiff}." +
                                      $" Journal file {_journalPager.FileName} might be corrupted. Debug details - file header {_currentFileHeader}", _journalInfo);
                        }

                        throw new InvalidJournalException(
                                  $"The last synced transaction id was {_journalInfo.LastSyncedTransactionId} (in journal: {_journalInfo.LastSyncedJournal}) but the first transaction being read in the recovery process is {current->TransactionId} (transaction has valid hash). Tx diff is: {txIdDiff}. " +
                                  $"Some journals are missing. Current journal file {_journalPager.FileName}. Debug details - file header {_currentFileHeader}", _journalInfo);
                    }
                }

                // if (txIdDiff == 1) :
                if (current->LastPageNumber <= 0)
                {
                    if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options))
                    {
                        options.InvokeIntegrityErrorOfAlreadySyncedData(this,
                                                                        $"Invalid last page number ({current->LastPageNumber}) in the header of transaction which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). " +
                                                                        $"Safely continuing the startup recovery process. Debug details - file header {_currentFileHeader}", null);

                        return(true);
                    }

                    throw new InvalidDataException("Last page number after committed transaction must be greater than 0. Debug details - file header {_currentFileHeader}");
                }
            }

            if (hashIsValid == false)
            {
                if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options))
                {
                    options.InvokeIntegrityErrorOfAlreadySyncedData(this,
                                                                    $"Invalid hash of data of transaction which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). " +
                                                                    "Safely continuing the startup recovery process.", null);

                    return(true);
                }

                RequireHeaderUpdate = true;
                return(false);
            }

            if (_firstValidTransactionHeader == null)
            {
                _firstValidTransactionHeader = current;
            }

            return(true);
        }
Esempio n. 24
0
        /// <summary>
        /// Executes a commit of data. This will flush the data to the disk use the provided header data to properly
        /// execute this function.
        /// </summary>
        /// <param name="header"></param>
        public void CommitChanges(FileHeaderBlock header)
        {
            using (var pageLock = new IoSession(this, m_pageReplacementAlgorithm))
            {
                //Determine how much committed data to write
                long lengthOfAllData = (header.LastAllocatedBlock + 1) * (long)m_fileStructureBlockSize;
                long copyLength      = lengthOfAllData - m_lengthOfCommittedData;

                //Write the uncommitted data.
                m_queue.Write(m_lengthOfCommittedData, m_writeBuffer, copyLength, waitForWriteToDisk: true);

                byte[] bytes = header.GetBytes();
                if (header.HeaderBlockCount == 10)
                {
                    //Update the new header to position 0, position 1, and one of position 2-9
                    m_queue.WriteRaw(0, bytes, m_fileStructureBlockSize);
                    m_queue.WriteRaw(m_fileStructureBlockSize, bytes, m_fileStructureBlockSize);
                    m_queue.WriteRaw(m_fileStructureBlockSize * ((header.SnapshotSequenceNumber & 7) + 2), bytes, m_fileStructureBlockSize);
                }
                else
                {
                    for (int x = 0; x < header.HeaderBlockCount; x++)
                    {
                        m_queue.WriteRaw(x * m_fileStructureBlockSize, bytes, m_fileStructureBlockSize);
                    }
                }

                m_queue.FlushFileBuffers();

                long startPos;

                //Copy recently committed data to the buffer pool
                if ((m_lengthOfCommittedData & (m_diskBlockSize - 1)) != 0) //Only if there is a split page.
                {
                    startPos = m_lengthOfCommittedData & (~(long)(m_diskBlockSize - 1));
                    //Finish filling up the split page in the buffer.
                    IntPtr ptrDest;

                    if (pageLock.TryGetSubPage(startPos, out ptrDest))
                    {
                        int    length;
                        IntPtr ptrSrc;
                        m_writeBuffer.ReadBlock(m_lengthOfCommittedData, out ptrSrc, out length);
                        Footer.WriteChecksumResultsToFooter(ptrSrc, m_fileStructureBlockSize, length);
                        ptrDest += (m_diskBlockSize - length);
                        Memory.Copy(ptrSrc, ptrDest, length);
                    }
                    startPos += m_diskBlockSize;
                }
                else
                {
                    startPos = m_lengthOfCommittedData;
                }

                while (startPos < lengthOfAllData)
                {
                    //If the address doesn't exist in the current list. Read it from the disk.
                    int    poolPageIndex;
                    IntPtr poolAddress;
                    m_pool.AllocatePage(out poolPageIndex, out poolAddress);
                    m_writeBuffer.CopyTo(startPos, poolAddress, m_diskBlockSize);
                    Footer.WriteChecksumResultsToFooter(poolAddress, m_fileStructureBlockSize, m_diskBlockSize);

                    if (!m_pageReplacementAlgorithm.TryAddPage(startPos, poolAddress, poolPageIndex))
                    {
                        m_pool.ReleasePage(poolPageIndex);
                    }

                    startPos += m_diskBlockSize;
                }
                m_lengthOfCommittedData = lengthOfAllData;
            }
            ReleaseWriteBufferSpace();
        }
Esempio n. 25
0
        private void HandleUncompressedNodes(DecompressedLeafPage decompressedPage, TreePage p, DecompressionUsage usage)
        {
            int numberOfEntries = p.NumberOfEntries;

            for (var i = 0; i < numberOfEntries; i++)
            {
                var uncompressedNode = p.GetNode(i);

                Slice nodeKey;
                using (TreeNodeHeader.ToSlicePtr(_tx.Allocator, uncompressedNode, out nodeKey))
                {
                    if (uncompressedNode->Flags == TreeNodeFlags.CompressionTombstone)
                    {
                        HandleTombstone(decompressedPage, nodeKey, usage);
                        continue;
                    }

                    if (decompressedPage.HasSpaceFor(_llt, TreeSizeOf.NodeEntry(uncompressedNode)) == false)
                    {
                        throw new InvalidOperationException("Could not add uncompressed node to decompressed page");
                    }

                    int index;

                    if (decompressedPage.NumberOfEntries > 0)
                    {
                        Slice lastKey;
                        using (decompressedPage.GetNodeKey(_llt, decompressedPage.NumberOfEntries - 1, out lastKey))
                        {
                            // optimization: it's very likely that uncompressed nodes have greater keys than compressed ones
                            // when we insert sequential keys

                            var cmp = SliceComparer.CompareInline(nodeKey, lastKey);

                            if (cmp > 0)
                            {
                                index = decompressedPage.NumberOfEntries;
                            }
                            else
                            {
                                if (cmp == 0)
                                {
                                    // update of the last entry, just decrement NumberOfEntries in the page and
                                    // put it at the last position

                                    index = decompressedPage.NumberOfEntries - 1;
                                    decompressedPage.Lower -= Constants.Tree.NodeOffsetSize;
                                }
                                else
                                {
                                    index = decompressedPage.NodePositionFor(_llt, nodeKey);

                                    if (decompressedPage.LastMatch == 0) // update
                                    {
                                        decompressedPage.RemoveNode(index);

                                        if (usage == DecompressionUsage.Write)
                                        {
                                            State.NumberOfEntries--;
                                        }
                                    }
                                }
                            }
                        }
                    }
                    else
                    {
                        // all uncompressed nodes were compresion tombstones which deleted all entries from the decompressed page
                        index = 0;
                    }

                    switch (uncompressedNode->Flags)
                    {
                    case TreeNodeFlags.PageRef:
                        decompressedPage.AddPageRefNode(index, nodeKey, uncompressedNode->PageNumber);
                        break;

                    case TreeNodeFlags.Data:
                        var pos       = decompressedPage.AddDataNode(index, nodeKey, uncompressedNode->DataSize);
                        var nodeValue = TreeNodeHeader.Reader(_llt, uncompressedNode);
                        Memory.Copy(pos, nodeValue.Base, nodeValue.Length);
                        break;

                    case TreeNodeFlags.MultiValuePageRef:
                        throw new NotSupportedException("Multi trees do not support compression");

                    default:
                        throw new NotSupportedException("Invalid node type to copye: " + uncompressedNode->Flags);
                    }
                }
            }
        }
Esempio n. 26
0
        private unsafe bool ReadSnapshot(SnapshotReader reader, ClusterOperationContext context, Transaction txw, bool dryRun, CancellationToken token)
        {
            var type = reader.ReadInt32();

            if (type == -1)
            {
                return(false);
            }

            while (true)
            {
                token.ThrowIfCancellationRequested();

                int  size;
                long entries;
                switch ((RootObjectType)type)
                {
                case RootObjectType.None:
                    return(true);

                case RootObjectType.VariableSizeTree:
                    size = reader.ReadInt32();
                    reader.ReadExactly(size);

                    Tree tree = null;
                    if (dryRun == false)
                    {
                        Slice.From(context.Allocator, reader.Buffer, 0, size, ByteStringType.Immutable, out Slice treeName);     // The Slice will be freed on context close
                        txw.DeleteTree(treeName);
                        tree = txw.CreateTree(treeName);
                    }

                    entries = reader.ReadInt64();
                    for (long i = 0; i < entries; i++)
                    {
                        token.ThrowIfCancellationRequested();
                        size = reader.ReadInt32();
                        reader.ReadExactly(size);
                        using (Slice.From(context.Allocator, reader.Buffer, 0, size, ByteStringType.Immutable, out Slice valKey))
                        {
                            size = reader.ReadInt32();
                            reader.ReadExactly(size);

                            if (dryRun == false)
                            {
                                using (tree.DirectAdd(valKey, size, out byte *ptr))
                                {
                                    fixed(byte *pBuffer = reader.Buffer)
                                    {
                                        Memory.Copy(ptr, pBuffer, size);
                                    }
                                }
                            }
                        }
                    }
                    break;

                case RootObjectType.Table:

                    size = reader.ReadInt32();
                    reader.ReadExactly(size);

                    TableValueReader tvr;
                    Table            table = null;
                    if (dryRun == false)
                    {
                        Slice.From(context.Allocator, reader.Buffer, 0, size, ByteStringType.Immutable,
                                   out Slice tableName);//The Slice will be freed on context close
                        var tableTree = txw.ReadTree(tableName, RootObjectType.Table);

                        // Get the table schema
                        var schemaSize = tableTree.GetDataSize(TableSchema.SchemasSlice);
                        var schemaPtr  = tableTree.DirectRead(TableSchema.SchemasSlice);
                        if (schemaPtr == null)
                        {
                            throw new InvalidOperationException(
                                      "When trying to install snapshot, found missing table " + tableName);
                        }

                        var schema = TableSchema.ReadFrom(txw.Allocator, schemaPtr, schemaSize);

                        table = txw.OpenTable(schema, tableName);

                        // delete the table
                        while (true)
                        {
                            token.ThrowIfCancellationRequested();
                            if (table.SeekOnePrimaryKey(Slices.AfterAllKeys, out tvr) == false)
                            {
                                break;
                            }
                            table.Delete(tvr.Id);
                        }
                    }

                    entries = reader.ReadInt64();
                    for (long i = 0; i < entries; i++)
                    {
                        token.ThrowIfCancellationRequested();
                        size = reader.ReadInt32();
                        reader.ReadExactly(size);

                        if (dryRun == false)
                        {
                            fixed(byte *pBuffer = reader.Buffer)
                            {
                                tvr = new TableValueReader(pBuffer, size);
                                table.Insert(ref tvr);
                            }
                        }
                    }
                    break;

                default:
                    throw new ArgumentOutOfRangeException(nameof(type), type.ToString());
                }

                type = reader.ReadInt32();
            }
        }
Esempio n. 27
0
 public void Execute(Memory memory)
 {
     var newConfig = memory.Copy();
     ExecuteOnConfiguration(newConfig);
 }
Esempio n. 28
0
        private void MoveLeafNode(Page parentPage, Page from, Page to)
        {
            Debug.Assert(from.IsBranch == false);
            var originalFromKeyStart = GetActualKey(from, from.LastSearchPositionOrLastEntry);

            var   fromNode = from.GetNode(from.LastSearchPosition);
            byte *val      = @from.Base + @from.KeysOffsets[@from.LastSearchPosition] + Constants.NodeHeaderSize + originalFromKeyStart.Size;

            var nodeVersion = fromNode->Version;             // every time new node is allocated the version is increased, but in this case we do not want to increase it

            if (nodeVersion > 0)
            {
                nodeVersion -= 1;
            }

            var prefixedOriginalFromKey = to.PrepareKeyToInsert(originalFromKeyStart, to.LastSearchPosition);

            byte *dataPos;
            var   fromDataSize = fromNode->DataSize;

            switch (fromNode->Flags)
            {
            case NodeFlags.PageRef:
                to.EnsureHasSpaceFor(_tx, prefixedOriginalFromKey, -1);
                dataPos = to.AddPageRefNode(to.LastSearchPosition, prefixedOriginalFromKey, fromNode->PageNumber);
                break;

            case NodeFlags.Data:
                to.EnsureHasSpaceFor(_tx, prefixedOriginalFromKey, fromDataSize);
                dataPos = to.AddDataNode(to.LastSearchPosition, prefixedOriginalFromKey, fromDataSize, nodeVersion);
                break;

            case NodeFlags.MultiValuePageRef:
                to.EnsureHasSpaceFor(_tx, prefixedOriginalFromKey, fromDataSize);
                dataPos = to.AddMultiValueNode(to.LastSearchPosition, prefixedOriginalFromKey, fromDataSize, nodeVersion);
                break;

            default:
                throw new NotSupportedException("Invalid node type to move: " + fromNode->Flags);
            }

            if (dataPos != null && fromDataSize > 0)
            {
                Memory.Copy(dataPos, val, fromDataSize);
            }

            from.RemoveNode(from.LastSearchPositionOrLastEntry);

            var pos = parentPage.LastSearchPositionOrLastEntry;

            parentPage.RemoveNode(pos);

            var newSeparatorKey = GetActualKey(to, 0); // get the next smallest key it has now
            var pageNumber      = to.PageNumber;

            if (parentPage.GetNode(0)->PageNumber == to.PageNumber)
            {
                pageNumber      = from.PageNumber;
                newSeparatorKey = GetActualKey(from, 0);
            }

            AddSeparatorToParentPage(parentPage, pageNumber, newSeparatorKey, pos);
        }
Esempio n. 29
0
        public void Setup()
        {
            var generator = new Random(RandomSeed);

            _allocator = new ByteStringContext(SharedMultipleUseFlag.None);
            _buffers   = new List <Tuple <ByteString, int> >();

            // Generate the precomputed sequences to be used when generating data.
            var sequences = new List <byte[]>();

            for (int i = 0; i < NumberOfSequences; i++)
            {
                int length   = generator.Next(1, GeneratedSequenceMaximumLength);
                var sequence = new byte[length];
                generator.NextBytes(sequence);
                sequences.Add(sequence);
            }

            // Compute the length of the maximum output data. This is an upper bound
            // to be able to always use the same buffer for decompression.
            int maximumOutputLength = (int)Sparrow.Compression.LZ4.MaximumOutputLength(DataMaximumLength);

            _allocator.Allocate(maximumOutputLength, out _lz4Buffer);

            var buffer = new byte[DataMaximumLength];

            for (int i = 0; i < NumberOfOperations; i++)
            {
                var        generatedDataLength = generator.Next(DataMaximumLength);
                List <int> usedSequences       = new List <int>();
                for (var j = 0; j < generatedDataLength; j++)
                {
                    bool useSequence = generator.NextDouble() < SequenceUsageProbability;
                    if (sequences.Count > 0 && useSequence)
                    {
                        byte[] sequence;
                        bool   repeatSequence = generator.NextDouble() < SequenceRepetitionProbability;
                        if (repeatSequence && usedSequences.Count > 0)
                        {
                            int index = generator.Next(usedSequences.Count);
                            sequence = sequences[usedSequences[index]];
                        }
                        else
                        {
                            int index = generator.Next(sequences.Count);
                            sequence = sequences[index];
                            usedSequences.Add(index);
                        }

                        fixed(byte *bufferPtr = &buffer[j])
                        fixed(byte *sequencePtr = sequence)
                        {
                            int amount = Math.Min(sequence.Length, generatedDataLength - j);

                            Memory.Copy(bufferPtr, sequencePtr, amount);
                            j += amount;
                        }
                    }
                    else
                    {
                        var spontaneousSequenceLength = Math.Min(generator.Next(GeneratedSequenceMaximumLength), generatedDataLength - j);
                        for (int k = 0; k < spontaneousSequenceLength; k++, j++)
                        {
                            buffer[j] = (byte)generator.Next(256);
                        }
                    }
                }

                // Flip bytes on the generated sequence, as required
                bool flipGeneratedSequence = generator.NextDouble() < DataFlipProbability;
                if (flipGeneratedSequence)
                {
                    for (var j = 0; j < generatedDataLength; j++)
                    {
                        bool flipGeneratedByte = generator.NextDouble() < DataByteFlipProbability;
                        if (flipGeneratedByte)
                        {
                            buffer[j] ^= (byte)generator.Next(256);
                        }
                    }
                }

                // Calculate compression size and store the generated data
                fixed(byte *bufferPtr = buffer)
                {
                    int compressedSize = Sparrow.Compression.LZ4.Encode64(bufferPtr, _lz4Buffer.Ptr, generatedDataLength, _lz4Buffer.Length);

                    ByteString unmanagedBuffer;

                    _allocator.From(_lz4Buffer.Ptr, compressedSize, ByteStringType.Immutable, out unmanagedBuffer);
                    _buffers.Add(new Tuple <ByteString, int>(unmanagedBuffer, generatedDataLength));
                }
            }
        }
Esempio n. 30
0
        public bool ReadOneTransactionToDataFile(StorageEnvironmentOptions options)
        {
            if (_readAt4Kb >= _journalPagerNumberOfAllocated4Kb)
            {
                return(false);
            }

            if (TryReadAndValidateHeader(options, out TransactionHeader * current) == false)
            {
                var lastValid4Kb = _readAt4Kb;
                _readAt4Kb++;

                while (_readAt4Kb < _journalPagerNumberOfAllocated4Kb)
                {
                    if (TryReadAndValidateHeader(options, out current))
                    {
                        if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options))
                        {
                            SkipCurrentTransaction(current);
                            return(true);
                        }

                        RequireHeaderUpdate = true;
                        break;
                    }
                    _readAt4Kb++;
                }

                _readAt4Kb = lastValid4Kb;
                return(false);
            }

            if (IsAlreadySyncTransaction(current))
            {
                SkipCurrentTransaction(current);
                return(true);
            }

            var performDecompression = current->CompressedSize != -1;

            var transactionSizeIn4Kb = GetTransactionSizeIn4Kb(current);

            _readAt4Kb += transactionSizeIn4Kb;

            TransactionHeaderPageInfo *pageInfoPtr;
            byte *outputPage;

            if (performDecompression)
            {
                var numberOfPages = GetNumberOfPagesFor(current->UncompressedSize);
                _recoveryPager.EnsureContinuous(0, numberOfPages);
                _recoveryPager.EnsureMapped(this, 0, numberOfPages);
                outputPage = _recoveryPager.AcquirePagePointer(this, 0);
                Memory.Set(outputPage, 0, (long)numberOfPages * Constants.Storage.PageSize);

                try
                {
                    LZ4.Decode64LongBuffers((byte *)current + sizeof(TransactionHeader), current->CompressedSize, outputPage,
                                            current->UncompressedSize, true);
                }
                catch (Exception e)
                {
                    options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e);
                    RequireHeaderUpdate = true;

                    return(false);
                }
                pageInfoPtr = (TransactionHeaderPageInfo *)outputPage;
            }
            else
            {
                var numberOfPages = GetNumberOfPagesFor(current->UncompressedSize);
                _recoveryPager.EnsureContinuous(0, numberOfPages);
                _recoveryPager.EnsureMapped(this, 0, numberOfPages);
                outputPage = _recoveryPager.AcquirePagePointer(this, 0);
                Memory.Set(outputPage, 0, (long)numberOfPages * Constants.Storage.PageSize);
                Memory.Copy(outputPage, (byte *)current + sizeof(TransactionHeader), current->UncompressedSize);
                pageInfoPtr = (TransactionHeaderPageInfo *)outputPage;
            }

            long totalRead = sizeof(TransactionHeaderPageInfo) * current->PageCount;

            if (totalRead > current->UncompressedSize)
            {
                throw new InvalidDataException($"Attempted to read position {totalRead} from transaction data while the transaction is size {current->UncompressedSize}");
            }

            for (var i = 0; i < current->PageCount; i++)
            {
                if (pageInfoPtr[i].PageNumber > current->LastPageNumber)
                {
                    throw new InvalidDataException($"Transaction {current->TransactionId} contains reference to page {pageInfoPtr[i].PageNumber} which is after the last allocated page {current->LastPageNumber}");
                }
            }

            for (var i = 0; i < current->PageCount; i++)
            {
                if (totalRead > current->UncompressedSize)
                {
                    throw new InvalidDataException($"Attempted to read position {totalRead} from transaction data while the transaction is size {current->UncompressedSize}");
                }

                Debug.Assert(_journalPager.Disposed == false);
                if (performDecompression)
                {
                    Debug.Assert(_recoveryPager.Disposed == false);
                }

                var numberOfPagesOnDestination = GetNumberOfPagesFor(pageInfoPtr[i].Size);
                _dataPager.EnsureContinuous(pageInfoPtr[i].PageNumber, numberOfPagesOnDestination);
                _dataPager.EnsureMapped(this, pageInfoPtr[i].PageNumber, numberOfPagesOnDestination);


                // We are going to overwrite the page, so we don't care about its current content
                var pagePtr = _dataPager.AcquirePagePointerForNewPage(this, pageInfoPtr[i].PageNumber, numberOfPagesOnDestination);
                _dataPager.MaybePrefetchMemory(pageInfoPtr[i].PageNumber, numberOfPagesOnDestination);

                var pageNumber = *(long *)(outputPage + totalRead);
                if (pageInfoPtr[i].PageNumber != pageNumber)
                {
                    throw new InvalidDataException($"Expected a diff for page {pageInfoPtr[i].PageNumber} but got one for {pageNumber}");
                }
                totalRead += sizeof(long);

                _modifiedPages.Add(pageNumber);

                for (var j = 1; j < numberOfPagesOnDestination; j++)
                {
                    _modifiedPages.Remove(pageNumber + j);
                }

                _dataPager.UnprotectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size);

                if (pageInfoPtr[i].DiffSize == 0)
                {
                    if (pageInfoPtr[i].Size == 0)
                    {
                        // diff contained no changes
                        continue;
                    }

                    var journalPagePtr = outputPage + totalRead;

                    if (options.Encryption.IsEnabled == false)
                    {
                        var pageHeader = (PageHeader *)journalPagePtr;

                        var checksum = StorageEnvironment.CalculatePageChecksum((byte *)pageHeader, pageNumber, out var expectedChecksum);
                        if (checksum != expectedChecksum)
                        {
                            ThrowInvalidChecksumOnPageFromJournal(pageNumber, current, expectedChecksum, checksum, pageHeader);
                        }
                    }

                    Memory.Copy(pagePtr, journalPagePtr, pageInfoPtr[i].Size);
                    totalRead += pageInfoPtr[i].Size;

                    if (options.Encryption.IsEnabled)
                    {
                        var pageHeader = (PageHeader *)pagePtr;

                        if ((pageHeader->Flags & PageFlags.Overflow) == PageFlags.Overflow)
                        {
                            // need to mark overlapped buffers as invalid for commit

                            var encryptionBuffers = ((IPagerLevelTransactionState)this).CryptoPagerTransactionState[_dataPager];

                            var numberOfPages = VirtualPagerLegacyExtensions.GetNumberOfOverflowPages(pageHeader->OverflowSize);

                            for (var j = 1; j < numberOfPages; j++)
                            {
                                if (encryptionBuffers.TryGetValue(pageNumber + j, out var buffer))
                                {
                                    buffer.SkipOnTxCommit = true;
                                }
                            }
                        }
                    }
                }
                else
                {
                    _diffApplier.Destination = pagePtr;
                    _diffApplier.Diff        = outputPage + totalRead;
                    _diffApplier.Size        = pageInfoPtr[i].Size;
                    _diffApplier.DiffSize    = pageInfoPtr[i].DiffSize;
                    _diffApplier.Apply(pageInfoPtr[i].IsNewDiff);
                    totalRead += pageInfoPtr[i].DiffSize;
                }

                _dataPager.ProtectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size);
            }

            LastTransactionHeader = current;

            return(true);
        }
Esempio n. 31
0
        private unsafe void WriteDocumentToServer(DocumentsOperationContext context, ReplicationBatchItem item)
        {
            using (Slice.From(context.Allocator, item.ChangeVector, out var cv))
                fixed(byte *pTemp = _tempBuffer)
                {
                    var requiredSize = sizeof(byte) +  // type
                                       sizeof(int) +   //  size of change vector
                                       cv.Size +
                                       sizeof(short) + // transaction marker
                                       sizeof(long) +  // Last modified ticks
                                       sizeof(DocumentFlags) +
                                       sizeof(int) +   // size of document ID
                                       item.Id.Size +
                                       sizeof(int);    // size of document

                    if (item.Collection != null)
                    {
                        requiredSize += item.Collection.Size + sizeof(int);
                    }

                    if (requiredSize > _tempBuffer.Length)
                    {
                        ThrowTooManyChangeVectorEntries(item);
                    }
                    int tempBufferPos = 0;

                    pTemp[tempBufferPos++] = (byte)item.Type;

                    *(int *)(pTemp + tempBufferPos) = cv.Size;
                    tempBufferPos += sizeof(int);

                    Memory.Copy(pTemp + tempBufferPos, cv.Content.Ptr, cv.Size);
                    tempBufferPos += cv.Size;

                    *(short *)(pTemp + tempBufferPos) = item.TransactionMarker;
                    tempBufferPos += sizeof(short);

                    *(long *)(pTemp + tempBufferPos) = item.LastModifiedTicks;
                    tempBufferPos += sizeof(long);

                    *(DocumentFlags *)(pTemp + tempBufferPos) = item.Flags;
                    tempBufferPos += sizeof(DocumentFlags);

                    *(int *)(pTemp + tempBufferPos) = item.Id.Size;
                    tempBufferPos += sizeof(int);

                    Memory.Copy(pTemp + tempBufferPos, item.Id.Buffer, item.Id.Size);
                    tempBufferPos += item.Id.Size;

                    if (item.Data != null)
                    {
                        *(int *)(pTemp + tempBufferPos) = item.Data.Size;
                        tempBufferPos += sizeof(int);

                        var docReadPos = 0;
                        while (docReadPos < item.Data.Size)
                        {
                            var sizeToCopy = Math.Min(item.Data.Size - docReadPos, _tempBuffer.Length - tempBufferPos);
                            if (sizeToCopy == 0) // buffer is full, need to flush it
                            {
                                _stream.Write(_tempBuffer, 0, tempBufferPos);
                                tempBufferPos = 0;
                                continue;
                            }
                            Memory.Copy(pTemp + tempBufferPos, item.Data.BasePointer + docReadPos, sizeToCopy);
                            tempBufferPos += sizeToCopy;
                            docReadPos    += sizeToCopy;
                        }
                    }
                    else
                    {
                        int dataSize;
                        if (item.Type == ReplicationBatchItem.ReplicationItemType.DocumentTombstone)
                        {
                            dataSize = -1;
                        }
                        else if ((item.Flags & DocumentFlags.DeleteRevision) == DocumentFlags.DeleteRevision)
                        {
                            dataSize = -2;
                        }
                        else
                        {
                            throw new InvalidDataException("Cannot write document with empty data.");
                        }
                        *(int *)(pTemp + tempBufferPos) = dataSize;
                        tempBufferPos += sizeof(int);

                        if (item.Collection == null) //precaution
                        {
                            throw new InvalidDataException("Cannot write item with empty collection name...");
                        }

                        *(int *)(pTemp + tempBufferPos) = item.Collection.Size;
                        tempBufferPos += sizeof(int);
                        Memory.Copy(pTemp + tempBufferPos, item.Collection.Buffer, item.Collection.Size);
                        tempBufferPos += item.Collection.Size;
                    }

                    _stream.Write(_tempBuffer, 0, tempBufferPos);
                }
        }