public int InsertPage(byte[] buffer, int size)
        {
            var hashKey = new HashKey(buffer, size);
            var key = ConvertToKey(hashKey);

            var pageByKey = storage.Pages.GetIndex(Tables.Pages.Indices.ByKey);
            var pageData = storage.Pages.GetIndex(Tables.Pages.Indices.Data);

            var result = pageByKey.Read(Snapshot, key, writeBatch.Value);
            if (result != null)
            {
                var id = result.Reader.ToStringValue();

                ushort version;
                var page = LoadJson(storage.Pages, id, writeBatch.Value, out version);
                if (page == null)
                    throw new InvalidOperationException(string.Format("Could not find page '{0}'. Probably data is corrupted.", id));

                var usageCount = page.Value<int>("usage_count");
                page["usage_count"] = usageCount + 1;

                storage.Pages.Add(writeBatch.Value, id, page, version);

                return page.Value<int>("id");
            }

            var newId = IdGenerator.GetNextIdForTable(storage.Pages);
            var newKey = CreateKey(newId);

            var newPage = new RavenJObject
                   {
                       {"id", newId},
                       {"page_strong_hash", hashKey.Strong},
                       {"page_weak_hash", hashKey.Weak},
                       {"usage_count", 0}
                   };

            storage.Pages.Add(writeBatch.Value, newKey, newPage, 0);
            pageData.Add(writeBatch.Value, newKey, buffer, 0);
            pageByKey.Add(writeBatch.Value, key, newKey);

            return newId;
        }
        private void DeletePage(int pageId)
        {
            var key = CreateKey(pageId);

            ushort version;
            var page = LoadJson(storage.Pages, key, writeBatch.Value, out version);
            var usageCount = page.Value<int>("usage_count");
            if (usageCount <= 1)
            {
                var pageData = storage.Pages.GetIndex(Tables.Pages.Indices.Data);
                var pagesByKey = storage.Pages.GetIndex(Tables.Pages.Indices.ByKey);

                var strongHash = page.Value<byte[]>("page_strong_hash");
                var weakHash = page.Value<int>("page_weak_hash");

                var hashKey = new HashKey
                              {
                                  Strong = strongHash,
                                  Weak = weakHash
                              };

                storage.Pages.Delete(writeBatch.Value, key, version);
                pageData.Delete(writeBatch.Value, key);
                pagesByKey.Delete(writeBatch.Value, ConvertToKey(hashKey));
            }
            else
            {
                page["usage_count"] = usageCount - 1;
                storage.Pages.Add(writeBatch.Value, key, page, version);
            }
        }
		public int InsertPage(byte[] buffer, int size)
		{
			var key = new HashKey(buffer, size);

			Api.JetSetCurrentIndex(session, Pages, "by_keys");

			Api.MakeKey(session, Pages, key.Weak, MakeKeyGrbit.NewKey);
			Api.MakeKey(session, Pages, key.Strong, MakeKeyGrbit.None);

			if (Api.TrySeek(session, Pages, SeekGrbit.SeekEQ))
			{
				Api.EscrowUpdate(session, Pages, tableColumnsCache.PagesColumns["usage_count"], 1);
				return Api.RetrieveColumnAsInt32(session, Pages, tableColumnsCache.PagesColumns["id"]).Value;
			}

			var bookMarkBuffer = new byte[SystemParameters.BookmarkMost];
			var actualSize = 0;
			using (var update = new Update(session, Pages, JET_prep.Insert))
			{
				Api.SetColumn(session, Pages, tableColumnsCache.PagesColumns["page_strong_hash"], key.Strong);
				Api.SetColumn(session, Pages, tableColumnsCache.PagesColumns["page_weak_hash"], key.Weak);
				Api.JetSetColumn(session, Pages, tableColumnsCache.PagesColumns["data"], buffer, size, SetColumnGrbit.None, null);

				try
				{
					update.Save(bookMarkBuffer, bookMarkBuffer.Length, out actualSize);
				}
				catch (EsentKeyDuplicateException)
				{
					// it means that page is being inserted by another thread
					throw new ConcurrencyException("The same file page is being created");
				}
			}

			Api.JetGotoBookmark(session, Pages, bookMarkBuffer, actualSize);

			return Api.RetrieveColumnAsInt32(session, Pages, tableColumnsCache.PagesColumns["id"]).Value;
		}
 protected string ConvertToKey(HashKey hashKey)
 {
     return CreateKey(Encoding.UTF8.GetString(hashKey.Strong), hashKey.Weak);
 }