示例#1
0
        public void Flush()
        {
            _bufferSize = 0;

            using (var tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite))
            {
                foreach (var kvp in _buffer)
                {
                    var data = _storageEnvironment.CreateTree(tx, "channel:" + kvp.Key);
                    var buffer = new byte[16];
                    var key = new Slice(buffer);
                    var ms = new MemoryStream();
                    var bw = new BinaryWriter(ms);
                    foreach (var item in kvp.Value)
                    {
                        var date = item.Timestamp;
                        EndianBitConverter.Big.CopyBytes(date.Ticks, buffer, 0);
                        EndianBitConverter.Big.CopyBytes(_last++, buffer, 8);
                        ms.SetLength(0);
                        bw.Write(item.Value);
                        ms.Position = 0;

                        data.Add(tx, key, ms);
                    }
                }

                tx.State.Root.Add(tx, _lastKey, new MemoryStream(BitConverter.GetBytes(_last)));
                tx.Commit();
            }
            _buffer.Clear();
        }
		public override void Accept(Disk d)
		{
			var ms = new MemoryStream();
			_serializer.Serialize(new JsonTextWriter(new StreamWriter(ms)), d);
			ms.Position = 0;
			var key = new Slice(EndianBitConverter.Big.GetBytes(counter++));
			_currentBatch.Add(key, ms, "albums");

			foreach (var diskId in d.DiskIds)
			{
				_currentBatch.MultiAdd(diskId, key, "ix_diskids");
			}

			if(d.Artist != null)
				_currentBatch.MultiAdd(d.Artist.ToLower(), key, "ix_artists");
			if (d.Title != null)
				_currentBatch.MultiAdd(d.Title.ToLower(), key, "ix_titles");

			if (counter%1000 == 0)
			{
				_storageEnvironment.Writer.Write(_currentBatch);
				_currentBatch = new WriteBatch();
			}

		}
示例#3
0
 public static void SetInline(Slice slice, NodeHeader* node)
 {
     slice.Pointer = (byte*)node + Constants.NodeHeaderSize;
     slice.Size = node->KeySize;
     slice.KeyLength = node->KeySize;
     slice.Array = null;
 }
示例#4
0
        public virtual void Add(WriteBatch writeBatch, Slice key, byte[] value, ushort? expectedVersion = null)
        {
            var stream = new BufferPoolMemoryStream();
            stream.Write(value, 0, value.Length);
            stream.Position = 0;

            writeBatch.Add(key, stream, TableName, expectedVersion);
        }
示例#5
0
 public PrefixedSlice()
 {
     Options = SliceOptions.Key;
     Size = 0;
     KeyLength = 0;
     Header = new PrefixedSliceHeader();
     NonPrefixedData = new Slice(SliceOptions.Key);
 }
示例#6
0
		public virtual void Add(WriteBatch writeBatch, Slice key, RavenJToken value, ushort? expectedVersion = null)
		{
            var stream = new BufferPoolMemoryStream(BufferPool);
            value.WriteTo(stream);
            stream.Position = 0;

			writeBatch.Add(key, stream, TableName, expectedVersion);
		}
示例#7
0
		public static int Compare(Slice x, PrefixedSlice y, SliceComparer cmp, ushort size)
		{
			fixed (byte* p1 = x.Array)
			fixed (byte* p2 = y.NonPrefixedData.Array)
			{
				var xPtr = p1 != null ? p1 : x.Pointer;
				var yPtr = p2 != null ? p2 : y.NonPrefixedData.Pointer;

				if (y.Header.PrefixId == PrefixedSlice.NonPrefixedId)
					return Compare(null, 0, null, 0, xPtr, x.KeyLength, yPtr, y.Header.NonPrefixedDataSize, cmp, size);

				if (x.PrefixComparisonCache == null)
				{
					if(y.Prefix == null)
						return Compare(null, 0, null, 0, xPtr, x.KeyLength, yPtr, y.Header.NonPrefixedDataSize, cmp, size);
					else if (y.Prefix.Value == null)
						return Compare(null, 0, y.Prefix.ValuePtr, y.Header.PrefixUsage, xPtr, x.KeyLength, yPtr, y.Header.NonPrefixedDataSize, cmp, size);
					else
					{
						fixed (byte* prefixVal = y.Prefix.Value)
							return Compare(null, 0, prefixVal, y.Header.PrefixUsage, xPtr, x.KeyLength, yPtr, y.Header.NonPrefixedDataSize, cmp, size);
					}
				}

				var prefixBytesToCompare = Math.Min(y.Header.PrefixUsage, x.KeyLength);

				int r;

				if (x.PrefixComparisonCache.TryGetCachedResult(y.Header.PrefixId, y.Prefix.PageNumber, prefixBytesToCompare, out r) == false)
				{
					if (y.Prefix == null)
						r = Compare(null, 0, null, 0, xPtr, x.KeyLength, null, 0, cmp,
							prefixBytesToCompare);

					else if (y.Prefix.Value == null)
						r = Compare(null, 0, y.Prefix.ValuePtr, y.Header.PrefixUsage, xPtr, x.KeyLength, null, 0, cmp,
							prefixBytesToCompare);
					else
					{
						fixed (byte* prefixVal = y.Prefix.Value)
							r = Compare(null, 0, prefixVal, y.Header.PrefixUsage, xPtr, x.KeyLength, null, 0, cmp,
								prefixBytesToCompare);
					}

					x.PrefixComparisonCache.SetPrefixComparisonResult(y.Header.PrefixId, y.Prefix.PageNumber, prefixBytesToCompare, r);
				}

				if (r != 0)
					return r;

				size -= prefixBytesToCompare;

				return Compare(null, 0, null, 0, xPtr + prefixBytesToCompare, (ushort)(x.KeyLength - prefixBytesToCompare), yPtr, y.Header.NonPrefixedDataSize, cmp, size);
			}
		}
示例#8
0
		public void UpdateSchemaVersion(TableStorage tableStorage,  Action<string> output)
		{
            var schemaVersionSlice = new Slice("schema_version");
			using (var tx = tableStorage.Environment.NewTransaction(TransactionFlags.ReadWrite))
			{
                tx.ReadTree(Tables.Details.TableName).Add(schemaVersionSlice, ToSchemaVersion);
				tx.Commit();
			}

			tableStorage.SetDatabaseIdAndSchemaVersion(tableStorage.Id, ToSchemaVersion);
		}
示例#9
0
		public Slice(Slice other, ushort size)
		{
			if (other.Array != null)
				Array = other.Array;
			else
				Pointer = other.Pointer;

			Options = other.Options;
			Size = size;
			KeyLength = size;
		}
示例#10
0
		private void Initialize()
		{
			using (var tx = storageEnvironment.NewTransaction(TransactionFlags.ReadWrite))
			{
				var serverNamesToIds = storageEnvironment.CreateTree(tx, "serverNames->Ids");
				var serverIdsToNames = storageEnvironment.CreateTree(tx, "Ids->serverNames");
				storageEnvironment.CreateTree(tx, "servers->lastEtag");
				storageEnvironment.CreateTree(tx, "counters");
				storageEnvironment.CreateTree(tx, "countersGroups");
				var etags = storageEnvironment.CreateTree(tx, "etags->counters");
				storageEnvironment.CreateTree(tx, "counters->etags");
				
				var metadata = tx.State.GetTree(tx, "$metadata");
				var id = metadata.Read("id");

				if (id == null) // new counter db
				{
					var serverIdBytes = EndianBitConverter.Big.GetBytes(0); // local is always 0
					var serverIdSlice = new Slice(serverIdBytes);
					serverNamesToIds.Add(CounterStorageUrl, serverIdSlice);
					serverIdsToNames.Add(serverIdSlice, CounterStorageUrl);

					Id = Guid.NewGuid();
					metadata.Add("id", Id.ToByteArray());
					metadata.Add("name", Encoding.UTF8.GetBytes(CounterStorageUrl));

					tx.Commit();
				}
				else // existing counter db
				{
					int used;
					Id = new Guid(id.Reader.ReadBytes(16, out used));
					var nameResult = metadata.Read("name");
					if (nameResult == null)
						throw new InvalidOperationException("Could not read name from the store, something bad happened");
					var storedName = new StreamReader(nameResult.Reader.AsStream()).ReadToEnd();

					if (storedName != CounterStorageUrl)
						throw new InvalidOperationException("The stored name " + storedName + " does not match the given name " + CounterStorageUrl);

					using (var it = etags.Iterate())
					{
						if (it.Seek(Slice.AfterAllKeys))
						{
							LastEtag = it.CurrentKey.CreateReader().ReadBigEndianInt64();
						}
					}
				}

                ReplicationTask.StartReplication();
			}
		}
示例#11
0
        public DateTimeSeries(string path)
        {
            _lastKey = "last-key";
            _storageEnvironment = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path));
            using (var tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite))
            {
                var read = tx.State.Root.Read(tx, _lastKey);

                _last = read != null ? read.Reader.ReadInt64() : 1;

                tx.Commit();
            }
        }
示例#12
0
        public PrefixedSlice(byte prefixId, ushort prefixUsage, Slice nonPrefixedValue)
        {
            Header = new PrefixedSliceHeader
            {
                PrefixId = prefixId,
                PrefixUsage = prefixUsage,
                NonPrefixedDataSize = nonPrefixedValue.KeyLength
            };

            NonPrefixedData = nonPrefixedValue;
            Size = (ushort)(Constants.PrefixedSliceHeaderSize + nonPrefixedValue.KeyLength);
            KeyLength = (ushort)(prefixUsage + nonPrefixedValue.KeyLength);
            Options = nonPrefixedValue.Options;
        }
示例#13
0
        protected override void Init()
        {
            base.Init();

            _positionsTree = Transaction.ReadTree("TermPositions");
            _docsTree = Transaction.ReadTree("Docs");

            _fieldId = Index.GetFieldNumber(Field);
            _prefix = new byte[FullTextIndex.FieldDocumentSize];
            _prefixSlice = new Slice(_prefix);

            _maxKey = new byte[FullTextIndex.FieldDocumentSize];
            _maxKeySlice = new Slice(_maxKey);
        }
示例#14
0
 protected RavenJObject LoadJson(Table table, Slice key, WriteBatch writeBatch, out ushort version)
 {
     var read = table.Read(Snapshot, key, writeBatch);
     if (read == null)
     {
         version = table.ReadVersion(Snapshot, key, writeBatch) ?? 0;
         return null;
     } 
     
     using (var stream = read.Reader.AsStream())
     {
         version = read.Version;
         return stream.ToJObject();
     }
 }
示例#15
0
		public void SetupDatabaseIdAndSchemaVersion()
		{
			using (var snapshot = storage.CreateSnapshot())
			{
                var idSlice = new Slice("id");
                var schemaVersionSlice = new Slice("schema_version");

				Guid id;
				string schemaVersion;

                var read = storage.Details.Read(snapshot, idSlice, null);
				if (read == null) // new db
				{
					id = Guid.NewGuid();
					schemaVersion = SchemaVersion;
					using (var writeIdBatch = new WriteBatch())
					{
                        storage.Details.Add(writeIdBatch, idSlice, id.ToByteArray());
                        storage.Details.Add(writeIdBatch, schemaVersionSlice, schemaVersion);
						storage.Write(writeIdBatch);
					}
				}
				else
				{
					if (read.Reader.Length != 16) //precaution - might prevent NRE in edge cases
						throw new InvalidDataException("Failed to initialize Voron transactional storage. Possible data corruption. (no db id)");

					using (var stream = read.Reader.AsStream())
					using (var reader = new BinaryReader(stream))
					{
						id = new Guid(reader.ReadBytes((int)stream.Length));
					}

                    var schemaRead = storage.Details.Read(snapshot, schemaVersionSlice, null);
					if (schemaRead == null)
						throw new InvalidDataException("Failed to initialize Voron transactional storage. Possible data corruption. (no schema version)");

					schemaVersion = schemaRead.Reader.ToStringValue();
				}

				storage.SetDatabaseIdAndSchemaVersion(id, schemaVersion);
			}
		}
示例#16
0
		public PrefixedSlice(NodeHeader* node)
		{
			if (node->KeySize > 0)
			{
				var prefixHeaderPtr = (PrefixedSliceHeader*)((byte*)node + Constants.NodeHeaderSize);
				Header = *prefixHeaderPtr;

				NonPrefixedData = new Slice((byte*)prefixHeaderPtr + Constants.PrefixedSliceHeaderSize, Header.NonPrefixedDataSize);

				Size = node->KeySize;
				KeyLength = (ushort) (Header.PrefixUsage + Header.NonPrefixedDataSize);
			}
			else
			{
				Size = 0;
				KeyLength = 0;
			}

			Options = SliceOptions.Key;
		}
        public override int Accept(string d)
        {
            var disk = JObject.Parse(d);

            var ms = new MemoryStream();
            var writer = new StreamWriter(ms);
            writer.Write(d);
            writer.Flush();
            ms.Position = 0;
            var key = new Slice(EndianBitConverter.Big.GetBytes(counter++));
            _currentBatch.Add(key, ms, "albums");
            int count = 1;

            foreach (var diskId in disk.Value<JArray>("DiskIds"))
            {
                count++;
                _currentBatch.MultiAdd(diskId.Value<string>(), key, "ix_diskids");
            }

            var artist = disk.Value<string>("Artist");
            if (artist != null)
            {
                count++;
                _currentBatch.MultiAdd(artist.ToLower(), key, "ix_artists");
            }
            var title = disk.Value<string>("Title");
            if (title != null)
            {
                count++;
                _currentBatch.MultiAdd(title.ToLower(), key, "ix_titles");
            }

            if (counter % 500 == 0)
            {
                _storageEnvironment.Writer.Write(_currentBatch);
                _currentBatch = new WriteBatch();
            }
            return count;
        }
        //returns true if it was update operation
        private bool WriteDocumentMetadata(JsonDocumentMetadata metadata, Slice key, bool shouldIgnoreConcurrencyExceptions = false)
        {
            var metadataStream = CreateStream();

            metadataStream.Write(metadata.Etag);
            metadataStream.Write(metadata.Key);

            if (metadata.LastModified.HasValue)
                metadataStream.Write(metadata.LastModified.Value.ToBinary());
            else
                metadataStream.Write((long)0);

            metadata.Metadata.WriteTo(metadataStream);

            metadataStream.Position = 0;

            ushort? existingVersion;
            var isUpdate = metadataIndex.Contains(Snapshot, key, writeBatch.Value, out existingVersion);
            metadataIndex.Add(writeBatch.Value, key, metadataStream, existingVersion, shouldIgnoreConcurrencyExceptions);

            return isUpdate;
        }
 private bool PutDocumentMetadataInternal(string key, Slice normalizedKey, RavenJObject metadata, Etag newEtag, DateTime savedAt)
 {
     return WriteDocumentMetadata(new JsonDocumentMetadata
     {
         Key = key,
         Etag = newEtag,
         Metadata = metadata,
         LastModified = savedAt
     }, normalizedKey);
 }
示例#20
0
		public bool Contains(SnapshotReader snapshot, Slice key, WriteBatch writeBatch, out ushort? version)
		{
			return snapshot.Contains(TableName, key, out version, writeBatch);
		}
示例#21
0
		public bool Contains(SnapshotReader snapshot, Slice key, WriteBatch writeBatch)
		{
			ushort? version;
			return Contains(snapshot, key, writeBatch, out version);
		}
示例#22
0
		public virtual IIterator MultiRead(SnapshotReader snapshot, Slice key)
		{
			return snapshot.MultiRead(TableName, key);
		}
示例#23
0
		public virtual ReadResult Read(SnapshotReader snapshot, Slice key, WriteBatch writeBatch)
		{
			return snapshot.Read(TableName, key, writeBatch);
		}
示例#24
0
		public virtual void Increment(WriteBatch writeBatch, Slice key, long delta, ushort? expectedVersion = null)
		{
			writeBatch.Increment(key, delta, TableName, expectedVersion);
		}
示例#25
0
		public virtual void Add(WriteBatch writeBatch, Slice key, Stream value, ushort? expectedVersion = null, bool shouldIgnoreConcurrencyExceptions = false)
		{
			writeBatch.Add(key, value, TableName, expectedVersion, shouldIgnoreConcurrencyExceptions);
		}
示例#26
0
		public virtual ushort? ReadVersion(SnapshotReader snapshot, Slice key, WriteBatch writeBatch)
		{
			return snapshot.ReadVersion(TableName, key, writeBatch);
		}
示例#27
0
		public virtual void Delete(WriteBatch writeBatch, Slice key, ushort? expectedVersion = null)
		{
			writeBatch.Delete(key, TableName, expectedVersion);
		}
        private JsonDocumentMetadata ReadDocumentMetadata(string normalizedKey, Slice sliceKey, out int size)
        {
            try
            {
                var metadataReadResult = metadataIndex.Read(Snapshot, sliceKey, writeBatch.Value);
                size = 0;
                if (metadataReadResult == null)
                    return null;

                using (var stream = metadataReadResult.Reader.AsStream())
                {
                    stream.Position = 0;
                    var etag = stream.ReadEtag();
                    var originalKey = stream.ReadString();
                    var lastModifiedDateTimeBinary = stream.ReadInt64();

                    var existingCachedDocument = documentCacher.GetCachedDocument(normalizedKey, etag);
                    size = (int)stream.Length;
                    var metadata = existingCachedDocument != null ? existingCachedDocument.Metadata : stream.ToJObject();
                    var lastModified = DateTime.FromBinary(lastModifiedDateTimeBinary);

                    return new JsonDocumentMetadata
                    {
                        Key = originalKey,
                        Etag = etag,
                        Metadata = metadata,
                        LastModified = lastModified
                    };
                }
            }
            catch (Exception e)
            {

                throw new InvalidDataException("Failed to de-serialize metadata of document " + normalizedKey, e);
            }
        }
        private RavenJObject ReadDocumentData(string normalizedKey, Slice sliceKey, Etag existingEtag, RavenJObject metadata, out int size)
        {
            try
            {
                size = -1;

                var existingCachedDocument = documentCacher.GetCachedDocument(normalizedKey, existingEtag);
                if (existingCachedDocument != null)
                {
                    size = existingCachedDocument.Size;
                    return existingCachedDocument.Document;
                }

                var documentReadResult = tableStorage.Documents.Read(Snapshot, sliceKey, writeBatch.Value);
                if (documentReadResult == null) //non existing document
                    return null;

                using (var stream = documentReadResult.Reader.AsStream())
                {
                    using (var decodedDocumentStream = documentCodecs.Aggregate(stream,
                            (current, codec) => codec.Value.Decode(normalizedKey, metadata, current)))
                    {
                        var streamToUse = decodedDocumentStream;
                        if (stream != decodedDocumentStream)
                            streamToUse = new CountingStream(decodedDocumentStream);

                        var documentData = decodedDocumentStream.ToJObject();

                        size = (int)Math.Max(stream.Position, streamToUse.Position);
                        documentCacher.SetCachedDocument(normalizedKey, existingEtag, documentData, metadata, size);

                        return documentData;
                    }
                }
            }
            catch (Exception e)
            { 
                InvalidDataException invalidDataException = null;
                try
                {
                    size = -1;
                    var documentReadResult = tableStorage.Documents.Read(Snapshot, sliceKey, writeBatch.Value);
                    if (documentReadResult == null) //non existing document
                        return null;

                    using (var stream = documentReadResult.Reader.AsStream())
                    {
                        using (var reader = new BinaryReader(stream))
                        {
                            if (reader.ReadUInt32() == DocumentCompression.CompressFileMagic)
                            {
                                invalidDataException = new InvalidDataException(string.Format("Document '{0}' is compressed, but the compression bundle is not enabled.\r\n" +
                                                                                              "You have to enable the compression bundle when dealing with compressed documents.", normalizedKey), e);
                            }
                        }
                    }

            
                }
                catch (Exception)
                {
                    // we are already in error handling mode, just ignore this
                }
                if(invalidDataException != null)
                    throw invalidDataException;

                throw new InvalidDataException("Failed to de-serialize a document: " + normalizedKey, e);
            }
        }
示例#30
0
		public virtual void MultiAdd(WriteBatch writeBatch, Slice key, Slice value, ushort? expectedVersion = null)
		{
			writeBatch.MultiAdd(key, value, TableName, expectedVersion);
		}