public RevisionInternal PutLocalRevision(RevisionInternal revision, string prevRevID ) { string docID = revision.GetDocId(); if (!docID.StartsWith("_local/")) { throw new CouchbaseLiteException(Status.BadRequest); } if (!revision.IsDeleted()) { // PUT: byte[] json = EncodeDocumentJSON(revision); string newRevID; if (prevRevID != null) { int generation = RevisionInternal.GenerationFromRevID(prevRevID); if (generation == 0) { throw new CouchbaseLiteException(Status.BadRequest); } newRevID = Sharpen.Extensions.ToString(++generation) + "-local"; ContentValues values = new ContentValues(); values.Put("revid", newRevID); values.Put("json", json); string[] whereArgs = new string[] { docID, prevRevID }; try { int rowsUpdated = database.Update("localdocs", values, "docid=? AND revid=?", whereArgs ); if (rowsUpdated == 0) { throw new CouchbaseLiteException(Status.Conflict); } } catch (SQLException e) { throw new CouchbaseLiteException(e, Status.InternalServerError); } } else { newRevID = "1-local"; ContentValues values = new ContentValues(); values.Put("docid", docID); values.Put("revid", newRevID); values.Put("json", json); try { database.InsertWithOnConflict("localdocs", null, values, SQLiteStorageEngine.ConflictIgnore ); } catch (SQLException e) { throw new CouchbaseLiteException(e, Status.InternalServerError); } } return revision.CopyWithDocID(docID, newRevID); } else { // DELETE: DeleteLocalDocument(docID, prevRevID); return revision; } }
public void ForceInsert(RevisionInternal rev, IList<string> revHistory, Uri source ) { RevisionInternal winningRev = null; bool inConflict = false; string docId = rev.GetDocId(); string revId = rev.GetRevId(); if (!IsValidDocumentId(docId) || (revId == null)) { throw new CouchbaseLiteException(Status.BadRequest); } int historyCount = 0; if (revHistory != null) { historyCount = revHistory.Count; } if (historyCount == 0) { revHistory = new AList<string>(); revHistory.AddItem(revId); historyCount = 1; } else { if (!revHistory[0].Equals(rev.GetRevId())) { throw new CouchbaseLiteException(Status.BadRequest); } } bool success = false; BeginTransaction(); try { // First look up all locally-known revisions of this document: long docNumericID = GetOrInsertDocNumericID(docId); RevisionList localRevs = GetAllRevisionsOfDocumentID(docId, docNumericID, false); if (localRevs == null) { throw new CouchbaseLiteException(Status.InternalServerError); } IList<bool> outIsDeleted = new AList<bool>(); IList<bool> outIsConflict = new AList<bool>(); bool oldWinnerWasDeletion = false; string oldWinningRevID = WinningRevIDOfDoc(docNumericID, outIsDeleted, outIsConflict ); if (outIsDeleted.Count > 0) { oldWinnerWasDeletion = true; } if (outIsConflict.Count > 0) { inConflict = true; } // Walk through the remote history in chronological order, matching each revision ID to // a local revision. When the list diverges, start creating blank local revisions to fill // in the local history: long sequence = 0; long localParentSequence = 0; string localParentRevID = null; for (int i = revHistory.Count - 1; i >= 0; --i) { revId = revHistory[i]; RevisionInternal localRev = localRevs.RevWithDocIdAndRevId(docId, revId); if (localRev != null) { // This revision is known locally. Remember its sequence as the parent of the next one: sequence = localRev.GetSequence(); System.Diagnostics.Debug.Assert((sequence > 0)); localParentSequence = sequence; localParentRevID = revId; } else { // This revision isn't known, so add it: RevisionInternal newRev; byte[] data = null; bool current = false; if (i == 0) { // Hey, this is the leaf revision we're inserting: newRev = rev; if (!rev.IsDeleted()) { data = EncodeDocumentJSON(rev); if (data == null) { throw new CouchbaseLiteException(Status.BadRequest); } } current = true; } else { // It's an intermediate parent, so insert a stub: newRev = new RevisionInternal(docId, revId, false, this); } // Insert it: sequence = InsertRevision(newRev, docNumericID, sequence, current, data); if (sequence <= 0) { throw new CouchbaseLiteException(Status.InternalServerError); } if (i == 0) { // Write any changed attachments for the new revision. As the parent sequence use // the latest local revision (this is to copy attachments from): IDictionary<string, AttachmentInternal> attachments = GetAttachmentsFromRevision( rev); if (attachments != null) { ProcessAttachmentsForRevision(attachments, rev, localParentSequence); StubOutAttachmentsInRevision(attachments, rev); } } } } // Mark the latest local rev as no longer current: if (localParentSequence > 0 && localParentSequence != sequence) { ContentValues args = new ContentValues(); args.Put("current", 0); string[] whereArgs = new string[] { System.Convert.ToString(localParentSequence) }; int numRowsChanged = 0; try { numRowsChanged = database.Update("revs", args, "sequence=? AND current!=0", whereArgs ); if (numRowsChanged == 0) { inConflict = true; } } catch (SQLException) { // local parent wasn't a leaf, ergo we just created a branch throw new CouchbaseLiteException(Status.InternalServerError); } } winningRev = Winner(docNumericID, oldWinningRevID, oldWinnerWasDeletion, rev); success = true; // Notify and return: NotifyChange(rev, winningRev, source, inConflict); } catch (SQLException) { throw new CouchbaseLiteException(Status.InternalServerError); } finally { EndTransaction(success); } }
public bool SetLastSequence(string lastSequence, string checkpointId, bool push) { ContentValues values = new ContentValues(); values.Put("remote", checkpointId); values.Put("push", push); values.Put("last_sequence", lastSequence); long newId = database.InsertWithOnConflict("replicators", null, values, SQLiteStorageEngine .ConflictReplace); return (newId == -1); }
public long InsertRevision(RevisionInternal rev, long docNumericID, long parentSequence , bool current, byte[] data) { long rowId = 0; try { ContentValues args = new ContentValues(); args.Put("doc_id", docNumericID); args.Put("revid", rev.GetRevId()); if (parentSequence != 0) { args.Put("parent", parentSequence); } args.Put("current", current); args.Put("deleted", rev.IsDeleted()); args.Put("json", data); rowId = database.Insert("revs", null, args); rev.SetSequence(rowId); } catch (Exception e) { Log.E(Database.Tag, "Error inserting revision", e); } return rowId; }
public RevisionInternal PutRevision(RevisionInternal oldRev, string prevRevId, bool allowConflict, Status resultStatus) { // prevRevId is the rev ID being replaced, or nil if an insert string docId = oldRev.GetDocId(); bool deleted = oldRev.IsDeleted(); if ((oldRev == null) || ((prevRevId != null) && (docId == null)) || (deleted && ( docId == null)) || ((docId != null) && !IsValidDocumentId(docId))) { throw new CouchbaseLiteException(Status.BadRequest); } BeginTransaction(); Cursor cursor = null; bool inConflict = false; RevisionInternal winningRev = null; RevisionInternal newRev = null; //// PART I: In which are performed lookups and validations prior to the insert... long docNumericID = (docId != null) ? GetDocNumericID(docId) : 0; long parentSequence = 0; string oldWinningRevID = null; try { bool oldWinnerWasDeletion = false; bool wasConflicted = false; if (docNumericID > 0) { IList<bool> outIsDeleted = new AList<bool>(); IList<bool> outIsConflict = new AList<bool>(); try { oldWinningRevID = WinningRevIDOfDoc(docNumericID, outIsDeleted, outIsConflict); if (outIsDeleted.Count > 0) { oldWinnerWasDeletion = true; } if (outIsConflict.Count > 0) { wasConflicted = true; } } catch (Exception e) { Sharpen.Runtime.PrintStackTrace(e); } } if (prevRevId != null) { // Replacing: make sure given prevRevID is current & find its sequence number: if (docNumericID <= 0) { string msg = string.Format("No existing revision found with doc id: %s", docId); throw new CouchbaseLiteException(msg, Status.NotFound); } string[] args = new string[] { System.Convert.ToString(docNumericID), prevRevId }; string additionalWhereClause = string.Empty; if (!allowConflict) { additionalWhereClause = "AND current=1"; } cursor = database.RawQuery("SELECT sequence FROM revs WHERE doc_id=? AND revid=? " + additionalWhereClause + " LIMIT 1", args); if (cursor.MoveToNext()) { parentSequence = cursor.GetLong(0); } if (parentSequence == 0) { // Not found: either a 404 or a 409, depending on whether there is any current revision if (!allowConflict && ExistsDocumentWithIDAndRev(docId, null)) { string msg = string.Format("Conflicts not allowed and there is already an existing doc with id: %s" , docId); throw new CouchbaseLiteException(msg, Status.Conflict); } else { string msg = string.Format("No existing revision found with doc id: %s", docId); throw new CouchbaseLiteException(msg, Status.NotFound); } } if (validations != null && validations.Count > 0) { // Fetch the previous revision and validate the new one against it: RevisionInternal prevRev = new RevisionInternal(docId, prevRevId, false, this); ValidateRevision(oldRev, prevRev); } // Make replaced rev non-current: ContentValues updateContent = new ContentValues(); updateContent.Put("current", 0); database.Update("revs", updateContent, "sequence=" + parentSequence, null); } else { // Inserting first revision. if (deleted && (docId != null)) { // Didn't specify a revision to delete: 404 or a 409, depending if (ExistsDocumentWithIDAndRev(docId, null)) { throw new CouchbaseLiteException(Status.Conflict); } else { throw new CouchbaseLiteException(Status.NotFound); } } // Validate: ValidateRevision(oldRev, null); if (docId != null) { // Inserting first revision, with docID given (PUT): if (docNumericID <= 0) { // Doc doesn't exist at all; create it: docNumericID = InsertDocumentID(docId); if (docNumericID <= 0) { return null; } } else { // Doc ID exists; check whether current winning revision is deleted: if (oldWinnerWasDeletion == true) { prevRevId = oldWinningRevID; parentSequence = GetSequenceOfDocument(docNumericID, prevRevId, false); } else { if (oldWinningRevID != null) { // The current winning revision is not deleted, so this is a conflict throw new CouchbaseLiteException(Status.Conflict); } } } } else { // Inserting first revision, with no docID given (POST): generate a unique docID: docId = Database.GenerateDocumentId(); docNumericID = InsertDocumentID(docId); if (docNumericID <= 0) { return null; } } } // There may be a conflict if (a) the document was already in conflict, or // (b) a conflict is created by adding a non-deletion child of a non-winning rev. inConflict = wasConflicted || (!deleted && prevRevId != null && oldWinningRevID != null && !prevRevId.Equals(oldWinningRevID)); //// PART II: In which insertion occurs... // Get the attachments: IDictionary<string, AttachmentInternal> attachments = GetAttachmentsFromRevision( oldRev); // Bump the revID and update the JSON: string newRevId = GenerateNextRevisionID(prevRevId); byte[] data = null; if (!oldRev.IsDeleted()) { data = EncodeDocumentJSON(oldRev); if (data == null) { // bad or missing json throw new CouchbaseLiteException(Status.BadRequest); } } newRev = oldRev.CopyWithDocID(docId, newRevId); StubOutAttachmentsInRevision(attachments, newRev); // Now insert the rev itself: long newSequence = InsertRevision(newRev, docNumericID, parentSequence, true, data ); if (newSequence == 0) { return null; } // Store any attachments: if (attachments != null) { ProcessAttachmentsForRevision(attachments, newRev, parentSequence); } // Figure out what the new winning rev ID is: winningRev = Winner(docNumericID, oldWinningRevID, oldWinnerWasDeletion, newRev); // Success! if (deleted) { resultStatus.SetCode(Status.Ok); } else { resultStatus.SetCode(Status.Created); } } catch (SQLException e1) { Log.E(Database.Tag, "Error putting revision", e1); return null; } finally { if (cursor != null) { cursor.Close(); } EndTransaction(resultStatus.IsSuccessful()); } //// EPILOGUE: A change notification is sent... NotifyChange(newRev, winningRev, null, inConflict); return newRev; }
internal Int64 InsertRevision(RevisionInternal rev, long docNumericID, long parentSequence, bool current, bool hasAttachments, IEnumerable<byte> data) { var rowId = 0L; try { var args = new ContentValues(); args["doc_id"] = docNumericID; args.Put("revid", rev.GetRevId()); if (parentSequence != 0) { args["parent"] = parentSequence; } args["current"] = current; args["deleted"] = rev.IsDeleted(); args["no_attachments"] = !hasAttachments; if (data != null) { args["json"] = data.ToArray(); } rowId = StorageEngine.Insert("revs", null, args); rev.SetSequence(rowId); } catch (Exception e) { Log.E(Tag, "Error inserting revision", e); } return rowId; }
/// <exception cref="System.Exception"></exception> public virtual void TestAttachments() { string testAttachmentName = "test_attachment"; BlobStore attachments = database.GetAttachments(); NUnit.Framework.Assert.AreEqual(0, attachments.Count()); NUnit.Framework.Assert.AreEqual(new HashSet<object>(), attachments.AllKeys()); Status status = new Status(); IDictionary<string, object> rev1Properties = new Dictionary<string, object>(); rev1Properties.Put("foo", 1); rev1Properties.Put("bar", false); RevisionInternal rev1 = database.PutRevision(new RevisionInternal(rev1Properties, database), null, false, status); NUnit.Framework.Assert.AreEqual(Status.Created, status.GetCode()); byte[] attach1 = Sharpen.Runtime.GetBytesForString("This is the body of attach1"); database.InsertAttachmentForSequenceWithNameAndType(new ByteArrayInputStream(attach1 ), rev1.GetSequence(), testAttachmentName, "text/plain", rev1.GetGeneration()); NUnit.Framework.Assert.AreEqual(Status.Created, status.GetCode()); //We must set the no_attachments column for the rev to false, as we are using an internal //private API call above (database.insertAttachmentForSequenceWithNameAndType) which does //not set the no_attachments column on revs table try { ContentValues args = new ContentValues(); args.Put("no_attachments=", false); database.GetDatabase().Update("revs", args, "sequence=?", new string[] { rev1.GetSequence ().ToString() }); } catch (SQLException e) { Log.E(Database.Tag, "Error setting rev1 no_attachments to false", e); throw new CouchbaseLiteException(Status.InternalServerError); } Attachment attachment = database.GetAttachmentForSequence(rev1.GetSequence(), testAttachmentName ); NUnit.Framework.Assert.AreEqual("text/plain", attachment.GetContentType()); byte[] data = IOUtils.ToByteArray(attachment.GetContent()); NUnit.Framework.Assert.IsTrue(Arrays.Equals(attach1, data)); IDictionary<string, object> innerDict = new Dictionary<string, object>(); innerDict.Put("content_type", "text/plain"); innerDict.Put("digest", "sha1-gOHUOBmIMoDCrMuGyaLWzf1hQTE="); innerDict.Put("length", 27); innerDict.Put("stub", true); innerDict.Put("revpos", 1); IDictionary<string, object> attachmentDict = new Dictionary<string, object>(); attachmentDict.Put(testAttachmentName, innerDict); IDictionary<string, object> attachmentDictForSequence = database.GetAttachmentsDictForSequenceWithContent (rev1.GetSequence(), EnumSet.NoneOf<Database.TDContentOptions>()); NUnit.Framework.Assert.AreEqual(attachmentDict, attachmentDictForSequence); RevisionInternal gotRev1 = database.GetDocumentWithIDAndRev(rev1.GetDocId(), rev1 .GetRevId(), EnumSet.NoneOf<Database.TDContentOptions>()); IDictionary<string, object> gotAttachmentDict = (IDictionary<string, object>)gotRev1 .GetProperties().Get("_attachments"); NUnit.Framework.Assert.AreEqual(attachmentDict, gotAttachmentDict); // Check the attachment dict, with attachments included: Sharpen.Collections.Remove(innerDict, "stub"); innerDict.Put("data", Base64.EncodeBytes(attach1)); attachmentDictForSequence = database.GetAttachmentsDictForSequenceWithContent(rev1 .GetSequence(), EnumSet.Of(Database.TDContentOptions.TDIncludeAttachments)); NUnit.Framework.Assert.AreEqual(attachmentDict, attachmentDictForSequence); gotRev1 = database.GetDocumentWithIDAndRev(rev1.GetDocId(), rev1.GetRevId(), EnumSet .Of(Database.TDContentOptions.TDIncludeAttachments)); gotAttachmentDict = (IDictionary<string, object>)gotRev1.GetProperties().Get("_attachments" ); NUnit.Framework.Assert.AreEqual(attachmentDict, gotAttachmentDict); // Add a second revision that doesn't update the attachment: IDictionary<string, object> rev2Properties = new Dictionary<string, object>(); rev2Properties.Put("_id", rev1.GetDocId()); rev2Properties.Put("foo", 2); rev2Properties.Put("bazz", false); RevisionInternal rev2 = database.PutRevision(new RevisionInternal(rev2Properties, database), rev1.GetRevId(), false, status); NUnit.Framework.Assert.AreEqual(Status.Created, status.GetCode()); database.CopyAttachmentNamedFromSequenceToSequence(testAttachmentName, rev1.GetSequence (), rev2.GetSequence()); // Add a third revision of the same document: IDictionary<string, object> rev3Properties = new Dictionary<string, object>(); rev3Properties.Put("_id", rev2.GetDocId()); rev3Properties.Put("foo", 2); rev3Properties.Put("bazz", false); RevisionInternal rev3 = database.PutRevision(new RevisionInternal(rev3Properties, database), rev2.GetRevId(), false, status); NUnit.Framework.Assert.AreEqual(Status.Created, status.GetCode()); byte[] attach2 = Sharpen.Runtime.GetBytesForString("<html>And this is attach2</html>" ); database.InsertAttachmentForSequenceWithNameAndType(new ByteArrayInputStream(attach2 ), rev3.GetSequence(), testAttachmentName, "text/html", rev2.GetGeneration()); // Check the 2nd revision's attachment: Attachment attachment2 = database.GetAttachmentForSequence(rev2.GetSequence(), testAttachmentName ); NUnit.Framework.Assert.AreEqual("text/plain", attachment2.GetContentType()); data = IOUtils.ToByteArray(attachment2.GetContent()); NUnit.Framework.Assert.IsTrue(Arrays.Equals(attach1, data)); // Check the 3rd revision's attachment: Attachment attachment3 = database.GetAttachmentForSequence(rev3.GetSequence(), testAttachmentName ); NUnit.Framework.Assert.AreEqual("text/html", attachment3.GetContentType()); data = IOUtils.ToByteArray(attachment3.GetContent()); NUnit.Framework.Assert.IsTrue(Arrays.Equals(attach2, data)); IDictionary<string, object> attachmentDictForRev3 = (IDictionary<string, object>) database.GetAttachmentsDictForSequenceWithContent(rev3.GetSequence(), EnumSet.NoneOf <Database.TDContentOptions>()).Get(testAttachmentName); if (attachmentDictForRev3.ContainsKey("follows")) { if (((bool)attachmentDictForRev3.Get("follows")) == true) { throw new RuntimeException("Did not expected attachment dict 'follows' key to be true" ); } else { throw new RuntimeException("Did not expected attachment dict to have 'follows' key" ); } } // Examine the attachment store: NUnit.Framework.Assert.AreEqual(2, attachments.Count()); ICollection<BlobKey> expected = new HashSet<BlobKey>(); expected.AddItem(BlobStore.KeyForBlob(attach1)); expected.AddItem(BlobStore.KeyForBlob(attach2)); NUnit.Framework.Assert.AreEqual(expected, attachments.AllKeys()); database.Compact(); // This clears the body of the first revision NUnit.Framework.Assert.AreEqual(1, attachments.Count()); ICollection<BlobKey> expected2 = new HashSet<BlobKey>(); expected2.AddItem(BlobStore.KeyForBlob(attach2)); NUnit.Framework.Assert.AreEqual(expected2, attachments.AllKeys()); }
internal void UpdateIndex() { Log.V(Database.Tag, "Re-indexing view {0} ...", Name); System.Diagnostics.Debug.Assert((Map != null)); if (Id <= 0) { var msg = string.Format("View.Id <= 0"); throw new CouchbaseLiteException(msg, new Status(StatusCode.NotFound)); } Database.BeginTransaction(); var result = new Status(StatusCode.InternalServerError); Cursor cursor = null; try { var lastSequence = LastSequenceIndexed; var dbMaxSequence = Database.LastSequenceNumber; if (lastSequence == dbMaxSequence) { // nothing to do (eg, kCBLStatusNotModified) Log.V(Database.Tag, "lastSequence ({0}) == dbMaxSequence ({1}), nothing to do", lastSequence, dbMaxSequence); result.SetCode(StatusCode.NotModified); return; } // First remove obsolete emitted results from the 'maps' table: var sequence = lastSequence; if (lastSequence < 0) { var msg = string.Format("lastSequence < 0 ({0})", lastSequence); throw new CouchbaseLiteException(msg, new Status(StatusCode.InternalServerError)); } if (lastSequence == 0) { // If the lastSequence has been reset to 0, make sure to remove // any leftover rows: var whereArgs = new string[] { Id.ToString() }; Database.StorageEngine.Delete("maps", "view_id=?", whereArgs); } else { // Delete all obsolete map results (ones from since-replaced // revisions): var args = new [] { Id.ToString(), lastSequence.ToString(), lastSequence.ToString() }; Database.StorageEngine.ExecSQL( "DELETE FROM maps WHERE view_id=? AND sequence IN (" + "SELECT parent FROM revs WHERE sequence>? " + "AND parent>0 AND parent<=?)", args); } var deleted = 0; cursor = Database.StorageEngine.RawQuery("SELECT changes()"); cursor.MoveToNext(); deleted = cursor.GetInt(0); cursor.Close(); // Find a better way to propagate this back // Now scan every revision added since the last time the view was indexed: var selectArgs = new[] { lastSequence.ToString() }; cursor = Database.StorageEngine.RawQuery("SELECT revs.doc_id, sequence, docid, revid, json, no_attachments FROM revs, docs " + "WHERE sequence>? AND current!=0 AND deleted=0 " + "AND revs.doc_id = docs.doc_id " + "ORDER BY revs.doc_id, revid DESC", CommandBehavior.SequentialAccess, selectArgs); var lastDocID = 0L; var keepGoing = cursor.MoveToNext(); while (keepGoing) { long docID = cursor.GetLong(0); if (docID != lastDocID) { // Only look at the first-iterated revision of any document, // because this is the // one with the highest revid, hence the "winning" revision // of a conflict. lastDocID = docID; // Reconstitute the document as a dictionary: sequence = cursor.GetLong(1); string docId = cursor.GetString(2); if (docId.StartsWith("_design/", StringComparison.InvariantCultureIgnoreCase)) { // design docs don't get indexed! keepGoing = cursor.MoveToNext(); continue; } var revId = cursor.GetString(3); var json = cursor.GetBlob(4); var noAttachments = cursor.GetInt(5) > 0; // Skip rows with the same doc_id -- these are losing conflicts. while ((keepGoing = cursor.MoveToNext()) && cursor.GetLong(0) == docID) { } if (lastSequence > 0) { // Find conflicts with documents from previous indexings. var selectArgs2 = new[] { Convert.ToString(docID), Convert.ToString(lastSequence) }; var cursor2 = Database.StorageEngine.RawQuery("SELECT revid, sequence FROM revs " + "WHERE doc_id=? AND sequence<=? AND current!=0 AND deleted=0 " + "ORDER BY revID DESC " + "LIMIT 1", selectArgs2); if (cursor2.MoveToNext()) { var oldRevId = cursor2.GetString(0); // This is the revision that used to be the 'winner'. // Remove its emitted rows: var oldSequence = cursor2.GetLong(1); var args = new[] { Sharpen.Extensions.ToString(Id), Convert.ToString(oldSequence) }; Database.StorageEngine.ExecSQL("DELETE FROM maps WHERE view_id=? AND sequence=?", args); if (RevisionInternal.CBLCompareRevIDs(oldRevId, revId) > 0) { // It still 'wins' the conflict, so it's the one that // should be mapped [again], not the current revision! revId = oldRevId; sequence = oldSequence; var selectArgs3 = new[] { Convert.ToString(sequence) }; json = Misc.ByteArrayResultForQuery( Database.StorageEngine, "SELECT json FROM revs WHERE sequence=?", selectArgs3 ); } } } // Get the document properties, to pass to the map function: var contentOptions = DocumentContentOptions.None; if (noAttachments) { contentOptions |= DocumentContentOptions.NoAttachments; } var properties = Database.DocumentPropertiesFromJSON( json, docId, revId, false, sequence, DocumentContentOptions.None ); if (properties != null) { // Call the user-defined map() to emit new key/value // pairs from this revision: // This is the emit() block, which gets called from within the // user-defined map() block // that's called down below. var enclosingView = this; var thisSequence = sequence; var map = Map; if (map == null) throw new CouchbaseLiteException("Map function is missing."); EmitDelegate emitBlock = (key, value) => { // TODO: Do we need to do any null checks on key or value? try { var keyJson = Manager.GetObjectMapper().WriteValueAsString(key); var valueJson = value == null ? null : Manager.GetObjectMapper().WriteValueAsString(value) ; var insertValues = new ContentValues(); insertValues.Put("view_id", enclosingView.Id); insertValues["sequence"] = thisSequence; insertValues["key"] = keyJson; insertValues["value"] = valueJson; enclosingView.Database.StorageEngine.Insert("maps", null, insertValues); // // According to the issue #81, it is possible that there will be another // thread inserting a new revision to the database at the same time that // the UpdateIndex operation is running. This event should be guarded by // the database transaction that the code begun but apparently it was not. // As a result, it is possible that dbMaxSequence will be out of date at // this point and could cause the last indexed sequence to be out of track // from the obsolete map entry cleanup operation, which eventually results // to duplicated documents in the indexed map. // // To prevent the issue above, as a workaroubd, we need to make sure that // we have the current max sequence of the indexed documents updated. // This diverts from the CBL's Android code which doesn't have the same issue // as the Android doesn't allow multiple thread to interact with the database // at the same time. if (thisSequence > dbMaxSequence) { dbMaxSequence = thisSequence; } } catch (Exception e) { Log.E(Database.Tag, "Error emitting", e); } }; map(properties, emitBlock); } } } // Finally, record the last revision sequence number that was // indexed: var updateValues = new ContentValues(); updateValues["lastSequence"] = dbMaxSequence; var whereArgs_1 = new string[] { Id.ToString() }; Database.StorageEngine.Update("views", updateValues, "view_id=?", whereArgs_1); // FIXME actually count number added :) Log.V(Database.Tag, "...Finished re-indexing view {0} up to sequence {1} (deleted {2} added ?)", Name, Convert.ToString(dbMaxSequence), deleted); result.SetCode(StatusCode.Ok); } catch (Exception e) { throw new CouchbaseLiteException(e, new Status(StatusCode.DbError)); } finally { if (cursor != null) { cursor.Close(); } if (!result.IsSuccessful) { Log.W(Database.Tag, "Failed to rebuild view {0}:{1}", Name, result.GetCode()); } if (Database != null) { Database.EndTransaction(result.IsSuccessful); } } }
public void InsertAttachmentForSequenceWithNameAndType(long sequence, string name , string contentType, int revpos, BlobKey key) { try { ContentValues args = new ContentValues(); args.Put("sequence", sequence); args.Put("filename", name); if (key != null) { args.Put("key", key.GetBytes()); args.Put("length", attachments.GetSizeOfBlob(key)); } args.Put("type", contentType); args.Put("revpos", revpos); long result = database.Insert("attachments", null, args); if (result == -1) { string msg = "Insert attachment failed (returned -1)"; Log.E(Database.Tag, msg); throw new CouchbaseLiteException(msg, Status.InternalServerError); } } catch (SQLException e) { Log.E(Database.Tag, "Error inserting attachment", e); throw new CouchbaseLiteException(e, Status.InternalServerError); } }
public override void Emit(object key, object value) { try { string valueJson; string keyJson = Manager.GetObjectMapper().WriteValueAsString(key); if (value == null) { valueJson = null; } else { valueJson = Manager.GetObjectMapper().WriteValueAsString(value); } ContentValues insertValues = new ContentValues(); insertValues.Put("view_id", this._enclosing.GetViewId()); insertValues.Put("sequence", this.sequence); insertValues.Put("key", keyJson); insertValues.Put("value", valueJson); this._enclosing.database.GetDatabase().Insert("maps", null, insertValues); } catch (Exception e) { Log.E(Log.TagView, "Error emitting", e); } }
public void TestAttachments() { var testAttachmentName = "test_attachment"; var attachments = database.Attachments; Assert.AreEqual(0, attachments.Count()); Assert.AreEqual(0, attachments.AllKeys().Count()); var rev1Properties = new Dictionary<string, object>(); rev1Properties["foo"] = 1; rev1Properties["bar"] = false; var status = new Status(); var rev1 = database.PutRevision( new RevisionInternal(rev1Properties, database), null, false, status); Assert.AreEqual(StatusCode.Created, status.GetCode()); var attach1 = Runtime.GetBytesForString( "This is the body of attach1").ToArray(); database.InsertAttachmentForSequenceWithNameAndType( new ByteArrayInputStream(attach1), rev1.GetSequence(), testAttachmentName, "text/plain", rev1.GetGeneration()); //We must set the no_attachments column for the rev to false, as we are using an internal //private API call above (database.insertAttachmentForSequenceWithNameAndType) which does //not set the no_attachments column on revs table try { var args = new ContentValues(); args.Put("no_attachments", false); database.StorageEngine.Update( "revs", args, "sequence=?", new[] { rev1.GetSequence().ToString() } ); } catch (SQLException e) { Log.E(Tag, "Error setting rev1 no_attachments to false", e); throw new CouchbaseLiteException(StatusCode.InternalServerError); } var attachment = database.GetAttachmentForSequence( rev1.GetSequence(), testAttachmentName ); Assert.AreEqual("text/plain", attachment.ContentType); var data = attachment.Content.ToArray(); Assert.IsTrue(Arrays.Equals(attach1, data)); // Workaround : // Not closing the content stream will cause Sharing Violation // Exception when trying to get the same attachment going forward. attachment.ContentStream.Close(); var innerDict = new Dictionary<string, object>(); innerDict["content_type"] = "text/plain"; innerDict["digest"] = "sha1-gOHUOBmIMoDCrMuGyaLWzf1hQTE="; innerDict["length"] = 27; innerDict["stub"] = true; innerDict["revpos"] = 1; var attachmentDict = new Dictionary<string, object>(); attachmentDict[testAttachmentName] = innerDict; var attachmentDictForSequence = database.GetAttachmentsDictForSequenceWithContent(rev1.GetSequence(), DocumentContentOptions.None); Assert.AreEqual(new SortedDictionary<string,object>(attachmentDict), new SortedDictionary<string,object>(attachmentDictForSequence));//Assert.AreEqual(1, attachmentDictForSequence.Count); var gotRev1 = database.GetDocumentWithIDAndRev(rev1.GetDocId(), rev1.GetRevId(), DocumentContentOptions.IncludeAttachments); var gotAttachmentDict = gotRev1.GetProperties() .Get("_attachments") .AsDictionary<string,object>(); Assert.AreEqual(attachmentDict.Select(kvp => kvp.Key).OrderBy(k => k), gotAttachmentDict.Select(kvp => kvp.Key).OrderBy(k => k)); // Check the attachment dict, with attachments included: innerDict.Remove("stub"); innerDict.Put("data", Convert.ToBase64String(attach1)); attachmentDictForSequence = database.GetAttachmentsDictForSequenceWithContent( rev1.GetSequence(), DocumentContentOptions.IncludeAttachments); Assert.AreEqual(new SortedDictionary<string,object>(attachmentDict[testAttachmentName].AsDictionary<string,object>()), new SortedDictionary<string,object>(attachmentDictForSequence[testAttachmentName].AsDictionary<string,object>())); gotRev1 = database.GetDocumentWithIDAndRev( rev1.GetDocId(), rev1.GetRevId(), DocumentContentOptions.IncludeAttachments); gotAttachmentDict = gotRev1.GetProperties() .Get("_attachments") .AsDictionary<string, object>() .Get(testAttachmentName) .AsDictionary<string,object>(); Assert.AreEqual(innerDict.Select(kvp => kvp.Key).OrderBy(k => k), gotAttachmentDict.Select(kvp => kvp.Key).OrderBy(k => k)); // Add a second revision that doesn't update the attachment: var rev2Properties = new Dictionary<string, object>(); rev2Properties.Put("_id", rev1.GetDocId()); rev2Properties["foo"] = 2; rev2Properties["bazz"] = false; var rev2 = database.PutRevision(new RevisionInternal(rev2Properties, database), rev1.GetRevId(), false, status); Assert.AreEqual(StatusCode.Created, status.GetCode()); database.CopyAttachmentNamedFromSequenceToSequence( testAttachmentName, rev1.GetSequence(), rev2.GetSequence()); // Add a third revision of the same document: var rev3Properties = new Dictionary<string, object>(); rev3Properties.Put("_id", rev2.GetDocId()); rev3Properties["foo"] = 2; rev3Properties["bazz"] = false; var rev3 = database.PutRevision(new RevisionInternal( rev3Properties, database), rev2.GetRevId(), false, status); Assert.AreEqual(StatusCode.Created, status.GetCode()); var attach2 = Runtime.GetBytesForString("<html>And this is attach2</html>").ToArray(); database.InsertAttachmentForSequenceWithNameAndType( new ByteArrayInputStream(attach2), rev3.GetSequence(), testAttachmentName, "text/html", rev2.GetGeneration()); // Check the 2nd revision's attachment: var attachment2 = database.GetAttachmentForSequence(rev2.GetSequence(), testAttachmentName); Assert.AreEqual("text/plain", attachment2.ContentType); data = attachment2.Content.ToArray(); Assert.IsTrue(Arrays.Equals(attach1, data)); // Workaround : // Not closing the content stream will cause Sharing Violation // Exception when trying to get the same attachment going forward. attachment2.ContentStream.Close(); // Check the 3rd revision's attachment: var attachment3 = database.GetAttachmentForSequence(rev3.GetSequence(), testAttachmentName); Assert.AreEqual("text/html", attachment3.ContentType); data = attachment3.Content.ToArray(); Assert.IsTrue(Arrays.Equals(attach2, data)); var attachmentDictForRev3 = database.GetAttachmentsDictForSequenceWithContent(rev3.GetSequence(), DocumentContentOptions.None) .Get(testAttachmentName) .AsDictionary<string,object>(); if (attachmentDictForRev3.ContainsKey("follows")) { if (((bool)attachmentDictForRev3.Get("follows")) == true) { throw new RuntimeException("Did not expected attachment dict 'follows' key to be true" ); } else { throw new RuntimeException("Did not expected attachment dict to have 'follows' key" ); } } // Workaround : // Not closing the content stream will cause Sharing Violation // Exception when trying to get the same attachment going forward. attachment3.ContentStream.Close(); // Examine the attachment store: Assert.AreEqual(2, attachments.Count()); var expected = new HashSet<BlobKey>(); expected.AddItem(BlobStore.KeyForBlob(attach1)); expected.AddItem(BlobStore.KeyForBlob(attach2)); Assert.AreEqual(expected.Count, attachments.AllKeys().Count()); foreach(var key in attachments.AllKeys()) { Assert.IsTrue(expected.Contains(key)); } database.Compact(); // This clears the body of the first revision Assert.AreEqual(1, attachments.Count()); var expected2 = new HashSet<BlobKey>(); expected2.AddItem(BlobStore.KeyForBlob(attach2)); Assert.AreEqual(expected2.Count, attachments.AllKeys().Count()); foreach(var key in attachments.AllKeys()) { Assert.IsTrue(expected2.Contains(key)); } }
public void UpdateIndex() { Log.V(Log.TagView, "Re-indexing view: %s", name); System.Diagnostics.Debug.Assert((mapBlock != null)); if (GetViewId() <= 0) { string msg = string.Format("getViewId() < 0"); throw new CouchbaseLiteException(msg, new Status(Status.NotFound)); } database.BeginTransaction(); Status result = new Status(Status.InternalServerError); Cursor cursor = null; try { long lastSequence = GetLastSequenceIndexed(); long dbMaxSequence = database.GetLastSequenceNumber(); if (lastSequence == dbMaxSequence) { // nothing to do (eg, kCBLStatusNotModified) Log.V(Log.TagView, "lastSequence (%s) == dbMaxSequence (%s), nothing to do", lastSequence , dbMaxSequence); result.SetCode(Status.NotModified); return; } // First remove obsolete emitted results from the 'maps' table: long sequence = lastSequence; if (lastSequence < 0) { string msg = string.Format("lastSequence < 0 (%s)", lastSequence); throw new CouchbaseLiteException(msg, new Status(Status.InternalServerError)); } if (lastSequence == 0) { // If the lastSequence has been reset to 0, make sure to remove // any leftover rows: string[] whereArgs = new string[] { Sharpen.Extensions.ToString(GetViewId()) }; database.GetDatabase().Delete("maps", "view_id=?", whereArgs); } else { // Delete all obsolete map results (ones from since-replaced // revisions): string[] args = new string[] { Sharpen.Extensions.ToString(GetViewId()), System.Convert.ToString (lastSequence), System.Convert.ToString(lastSequence) }; database.GetDatabase().ExecSQL("DELETE FROM maps WHERE view_id=? AND sequence IN (" + "SELECT parent FROM revs WHERE sequence>? " + "AND parent>0 AND parent<=?)", args); } int deleted = 0; cursor = database.GetDatabase().RawQuery("SELECT changes()", null); cursor.MoveToNext(); deleted = cursor.GetInt(0); cursor.Close(); // This is the emit() block, which gets called from within the // user-defined map() block // that's called down below. AbstractTouchMapEmitBlock emitBlock = new _AbstractTouchMapEmitBlock_428(this); //Log.v(Log.TAG_VIEW, " emit(" + keyJson + ", " // + valueJson + ")"); // find a better way to propagate this back // Now scan every revision added since the last time the view was // indexed: string[] selectArgs = new string[] { System.Convert.ToString(lastSequence) }; cursor = database.GetDatabase().RawQuery("SELECT revs.doc_id, sequence, docid, revid, json, no_attachments FROM revs, docs " + "WHERE sequence>? AND current!=0 AND deleted=0 " + "AND revs.doc_id = docs.doc_id " + "ORDER BY revs.doc_id, revid DESC", selectArgs); long lastDocID = 0; bool keepGoing = cursor.MoveToNext(); while (keepGoing) { long docID = cursor.GetLong(0); if (docID != lastDocID) { // Only look at the first-iterated revision of any document, // because this is the // one with the highest revid, hence the "winning" revision // of a conflict. lastDocID = docID; // Reconstitute the document as a dictionary: sequence = cursor.GetLong(1); string docId = cursor.GetString(2); if (docId.StartsWith("_design/")) { // design docs don't get indexed! keepGoing = cursor.MoveToNext(); continue; } string revId = cursor.GetString(3); byte[] json = cursor.GetBlob(4); bool noAttachments = cursor.GetInt(5) > 0; while ((keepGoing = cursor.MoveToNext()) && cursor.GetLong(0) == docID) { } // Skip rows with the same doc_id -- these are losing conflicts. if (lastSequence > 0) { // Find conflicts with documents from previous indexings. string[] selectArgs2 = new string[] { System.Convert.ToString(docID), System.Convert.ToString (lastSequence) }; Cursor cursor2 = database.GetDatabase().RawQuery("SELECT revid, sequence FROM revs " + "WHERE doc_id=? AND sequence<=? AND current!=0 AND deleted=0 " + "ORDER BY revID DESC " + "LIMIT 1", selectArgs2); if (cursor2.MoveToNext()) { string oldRevId = cursor2.GetString(0); // This is the revision that used to be the 'winner'. // Remove its emitted rows: long oldSequence = cursor2.GetLong(1); string[] args = new string[] { Sharpen.Extensions.ToString(GetViewId()), System.Convert.ToString (oldSequence) }; database.GetDatabase().ExecSQL("DELETE FROM maps WHERE view_id=? AND sequence=?", args); if (RevisionInternal.CBLCompareRevIDs(oldRevId, revId) > 0) { // It still 'wins' the conflict, so it's the one that // should be mapped [again], not the current revision! revId = oldRevId; sequence = oldSequence; string[] selectArgs3 = new string[] { System.Convert.ToString(sequence) }; json = Utils.ByteArrayResultForQuery(database.GetDatabase(), "SELECT json FROM revs WHERE sequence=?" , selectArgs3); } } } // Get the document properties, to pass to the map function: EnumSet<Database.TDContentOptions> contentOptions = EnumSet.NoneOf<Database.TDContentOptions >(); if (noAttachments) { contentOptions.AddItem(Database.TDContentOptions.TDNoAttachments); } IDictionary<string, object> properties = database.DocumentPropertiesFromJSON(json , docId, revId, false, sequence, contentOptions); if (properties != null) { // Call the user-defined map() to emit new key/value // pairs from this revision: emitBlock.SetSequence(sequence); mapBlock.Map(properties, emitBlock); } } } // Finally, record the last revision sequence number that was // indexed: ContentValues updateValues = new ContentValues(); updateValues.Put("lastSequence", dbMaxSequence); string[] whereArgs_1 = new string[] { Sharpen.Extensions.ToString(GetViewId()) }; database.GetDatabase().Update("views", updateValues, "view_id=?", whereArgs_1); // FIXME actually count number added :) Log.V(Log.TagView, "Finished re-indexing view: %s " + " up to sequence %s" + " (deleted %s added ?)" , name, dbMaxSequence, deleted); result.SetCode(Status.Ok); } catch (SQLException e) { throw new CouchbaseLiteException(e, new Status(Status.DbError)); } finally { if (cursor != null) { cursor.Close(); } if (!result.IsSuccessful()) { Log.W(Log.TagView, "Failed to rebuild view %s. Result code: %d", name, result.GetCode ()); } if (database != null) { database.EndTransaction(result.IsSuccessful()); } } }
public void DeleteIndex() { if (GetViewId() < 0) { return; } bool success = false; try { database.BeginTransaction(); string[] whereArgs = new string[] { Sharpen.Extensions.ToString(GetViewId()) }; database.GetDatabase().Delete("maps", "view_id=?", whereArgs); ContentValues updateValues = new ContentValues(); updateValues.Put("lastSequence", 0); database.GetDatabase().Update("views", updateValues, "view_id=?", whereArgs); success = true; } catch (SQLException e) { Log.E(Log.TagView, "Error removing index", e); } finally { database.EndTransaction(success); } }
public bool SetMapReduce(Mapper mapBlock, Reducer reduceBlock, string version) { System.Diagnostics.Debug.Assert((mapBlock != null)); System.Diagnostics.Debug.Assert((version != null)); this.mapBlock = mapBlock; this.reduceBlock = reduceBlock; if (!database.Open()) { return false; } // Update the version column in the database. This is a little weird looking // because we want to // avoid modifying the database if the version didn't change, and because the // row might not exist yet. SQLiteStorageEngine storageEngine = this.database.GetDatabase(); // Older Android doesnt have reliable insert or ignore, will to 2 step // FIXME review need for change to execSQL, manual call to changes() string sql = "SELECT name, version FROM views WHERE name=?"; string[] args = new string[] { name }; Cursor cursor = null; try { cursor = storageEngine.RawQuery(sql, args); if (!cursor.MoveToNext()) { // no such record, so insert ContentValues insertValues = new ContentValues(); insertValues.Put("name", name); insertValues.Put("version", version); storageEngine.Insert("views", null, insertValues); return true; } ContentValues updateValues = new ContentValues(); updateValues.Put("version", version); updateValues.Put("lastSequence", 0); string[] whereArgs = new string[] { name, version }; int rowsAffected = storageEngine.Update("views", updateValues, "name=? AND version!=?" , whereArgs); return (rowsAffected > 0); } catch (SQLException e) { Log.E(Log.TagView, "Error setting map block", e); return false; } finally { if (cursor != null) { cursor.Close(); } } }
internal Boolean SetLastSequence(String lastSequence, String checkpointId, Boolean push) { var values = new ContentValues(); values.Put("remote", checkpointId); values["push"] = push; values["last_sequence"] = lastSequence; var newId = StorageEngine.InsertWithOnConflict("replicators", null, values, ConflictResolutionStrategy.Replace); Log.D(Tag, "Set Last Sequence: {0}: {1} / {2}".Fmt(lastSequence, checkpointId, newId)); return (newId == -1); }
public void Compact() { // Can't delete any rows because that would lose revision tree history. // But we can remove the JSON of non-current revisions, which is most of the space. try { Log.V(Couchbase.Lite.Database.Tag, "Pruning old revisions..."); PruneRevsToMaxDepth(0); Log.V(Couchbase.Lite.Database.Tag, "Deleting JSON of old revisions..."); ContentValues args = new ContentValues(); args.Put("json", (string)null); database.Update("revs", args, "current=0", null); } catch (SQLException e) { Log.E(Couchbase.Lite.Database.Tag, "Error compacting", e); throw new CouchbaseLiteException(Status.InternalServerError); } Log.V(Couchbase.Lite.Database.Tag, "Deleting old attachments..."); Status result = GarbageCollectAttachments(); if (!result.IsSuccessful()) { throw new CouchbaseLiteException(result); } Log.V(Couchbase.Lite.Database.Tag, "Vacuuming SQLite sqliteDb..."); try { database.ExecSQL("VACUUM"); } catch (SQLException e) { Log.E(Couchbase.Lite.Database.Tag, "Error vacuuming sqliteDb", e); throw new CouchbaseLiteException(Status.InternalServerError); } }
/// <exception cref="Couchbase.Lite.CouchbaseLiteException"></exception> internal void InsertAttachmentForSequenceWithNameAndType(long sequence, string name, string contentType, int revpos, BlobKey key) { try { var args = new ContentValues(); // TODO: Create Add override and refactor to use initializer syntax. args["sequence"] = sequence; args["filename"] = name; if (key != null) { args.Put("key", key.GetBytes()); args.Put("length", Attachments.GetSizeOfBlob(key)); } args["type"] = contentType; args["revpos"] = revpos; var result = StorageEngine.Insert("attachments", null, args); if (result == -1) { var msg = "Insert attachment failed (returned -1)"; Log.E(Tag, msg); throw new CouchbaseLiteException(msg, StatusCode.InternalServerError); } } catch (SQLException e) { Log.E(Tag, "Error inserting attachment", e); throw new CouchbaseLiteException(StatusCode.InternalServerError); } }
public long InsertDocumentID(string docId) { long rowId = -1; try { ContentValues args = new ContentValues(); args.Put("docid", docId); rowId = database.Insert("docs", null, args); } catch (Exception e) { Log.E(Database.Tag, "Error inserting document id", e); } return rowId; }
internal void UpdateIndex() { Log.V(Database.Tag, "Re-indexing view " + Name + " ..."); System.Diagnostics.Debug.Assert((Map != null)); if (Id < 0) { var msg = string.Format("View.Id < 0"); throw new CouchbaseLiteException(msg, new Status(StatusCode.NotFound)); } Database.BeginTransaction(); var result = new Status(StatusCode.InternalServerError); Cursor cursor = null; try { var lastSequence = LastSequenceIndexed; var dbMaxSequence = Database.LastSequenceNumber; if (lastSequence == dbMaxSequence) { // nothing to do (eg, kCBLStatusNotModified) var msg = String.Format("lastSequence ({0}) == dbMaxSequence ({1}), nothing to do", lastSequence, dbMaxSequence); Log.D(Database.Tag, msg); result.SetCode(StatusCode.Ok); return; } // First remove obsolete emitted results from the 'maps' table: var sequence = lastSequence; if (lastSequence < 0) { var msg = string.Format("lastSequence < 0 ({0})", lastSequence); throw new CouchbaseLiteException(msg, new Status(StatusCode.InternalServerError)); } if (lastSequence == 0) { // If the lastSequence has been reset to 0, make sure to remove // any leftover rows: var whereArgs = new string[] { Sharpen.Extensions.ToString(Id) }; Database.StorageEngine.Delete("maps", "view_id=@", whereArgs); } else { // Delete all obsolete map results (ones from since-replaced // revisions): var args = new [] { Id.ToString(), lastSequence.ToString(), lastSequence.ToString() }; Database.StorageEngine.ExecSQL( "DELETE FROM maps WHERE view_id=@ AND sequence IN (" + "SELECT parent FROM revs WHERE sequence>@ " + "AND parent>0 AND parent<=@)", args); } var deleted = 0; cursor = Database.StorageEngine.RawQuery("SELECT changes()", null); // TODO: Convert to ADO params. cursor.MoveToNext(); deleted = cursor.GetInt(0); cursor.Close(); // find a better way to propagate this back // Now scan every revision added since the last time the view was // indexed: var selectArgs = new[] { Convert.ToString(lastSequence) }; cursor = Database.StorageEngine.RawQuery("SELECT revs.doc_id, sequence, docid, revid, json FROM revs, docs " + "WHERE sequence>@ AND current!=0 AND deleted=0 " + "AND revs.doc_id = docs.doc_id " + "ORDER BY revs.doc_id, revid DESC", CommandBehavior.SequentialAccess, selectArgs); cursor.MoveToNext(); var lastDocID = 0L; while (!cursor.IsAfterLast()) { long docID = cursor.GetLong(0); if (docID != lastDocID) { // Only look at the first-iterated revision of any document, // because this is the // one with the highest revid, hence the "winning" revision // of a conflict. lastDocID = docID; // Reconstitute the document as a dictionary: sequence = cursor.GetLong(1); string docId = cursor.GetString(2); if (docId.StartsWith("_design/", StringCompare.IgnoreCase)) { // design docs don't get indexed! cursor.MoveToNext(); continue; } var revId = cursor.GetString(3); var json = cursor.GetBlob(4); var properties = Database.DocumentPropertiesFromJSON( json, docId, revId, false, sequence, EnumSet.NoneOf<TDContentOptions>() ); if (properties != null) { // Call the user-defined map() to emit new key/value // pairs from this revision: Log.V(Database.Tag, " call map for sequence=" + System.Convert.ToString(sequence )); // This is the emit() block, which gets called from within the // user-defined map() block // that's called down below. var enclosingView = this; var thisSequence = sequence; var map = Map; if (map == null) throw new CouchbaseLiteException("Map function is missing."); EmitDelegate emitBlock = (key, value) => { // TODO: Do we need to do any null checks on key or value? try { var keyJson = Manager.GetObjectMapper().WriteValueAsString(key); var valueJson = value == null ? null : Manager.GetObjectMapper().WriteValueAsString(value) ; Log.V(Database.Tag, String.Format(" emit({0}, {1})", keyJson, valueJson)); var insertValues = new ContentValues(); insertValues.Put("view_id", enclosingView.Id); insertValues["sequence"] = thisSequence; insertValues["key"] = keyJson; insertValues["value"] = valueJson; enclosingView.Database.StorageEngine.Insert("maps", null, insertValues); } catch (Exception e) { Log.E(Database.Tag, "Error emitting", e); } }; map(properties, emitBlock); } } cursor.MoveToNext(); } // Finally, record the last revision sequence number that was // indexed: ContentValues updateValues = new ContentValues(); updateValues["lastSequence"] = dbMaxSequence; var whereArgs_1 = new string[] { Sharpen.Extensions.ToString(Id) }; Database.StorageEngine.Update("views", updateValues, "view_id=@", whereArgs_1); // FIXME actually count number added :) Log.V(Database.Tag, "...Finished re-indexing view " + Name + " up to sequence " + System.Convert.ToString(dbMaxSequence) + " (deleted " + deleted + " added " + "?" + ")"); result.SetCode(StatusCode.Ok); } catch (SQLException e) { throw new CouchbaseLiteException(e, new Status(StatusCode.DbError)); } finally { if (cursor != null) { cursor.Close(); } if (!result.IsSuccessful()) { Log.W(Database.Tag, "Failed to rebuild view " + Name + ": " + result.GetCode()); } if (Database != null) { Database.EndTransaction(result.IsSuccessful()); } } }
public void UpdateIndex() { Log.V(Database.Tag, "Re-indexing view " + name + " ..."); System.Diagnostics.Debug.Assert((mapBlock != null)); if (GetViewId() < 0) { string msg = string.Format("getViewId() < 0"); throw new CouchbaseLiteException(msg, new Status(Status.NotFound)); } database.BeginTransaction(); Status result = new Status(Status.InternalServerError); Cursor cursor = null; try { long lastSequence = GetLastSequenceIndexed(); long dbMaxSequence = database.GetLastSequenceNumber(); if (lastSequence == dbMaxSequence) { // nothing to do (eg, kCBLStatusNotModified) string msg = string.Format("lastSequence (%d) == dbMaxSequence (%d), nothing to do" , lastSequence, dbMaxSequence); Log.D(Database.Tag, msg); result.SetCode(Status.Ok); return; } // First remove obsolete emitted results from the 'maps' table: long sequence = lastSequence; if (lastSequence < 0) { string msg = string.Format("lastSequence < 0 (%s)", lastSequence); throw new CouchbaseLiteException(msg, new Status(Status.InternalServerError)); } if (lastSequence == 0) { // If the lastSequence has been reset to 0, make sure to remove // any leftover rows: string[] whereArgs = new string[] { Sharpen.Extensions.ToString(GetViewId()) }; database.GetDatabase().Delete("maps", "view_id=?", whereArgs); } else { // Delete all obsolete map results (ones from since-replaced // revisions): string[] args = new string[] { Sharpen.Extensions.ToString(GetViewId()), System.Convert.ToString (lastSequence), System.Convert.ToString(lastSequence) }; database.GetDatabase().ExecSQL("DELETE FROM maps WHERE view_id=? AND sequence IN (" + "SELECT parent FROM revs WHERE sequence>? " + "AND parent>0 AND parent<=?)", args); } int deleted = 0; cursor = database.GetDatabase().RawQuery("SELECT changes()", null); cursor.MoveToNext(); deleted = cursor.GetInt(0); cursor.Close(); // This is the emit() block, which gets called from within the // user-defined map() block // that's called down below. AbstractTouchMapEmitBlock emitBlock = new _AbstractTouchMapEmitBlock_446(this); // find a better way to propagate this back // Now scan every revision added since the last time the view was // indexed: string[] selectArgs = new string[] { System.Convert.ToString(lastSequence) }; cursor = database.GetDatabase().RawQuery("SELECT revs.doc_id, sequence, docid, revid, json FROM revs, docs " + "WHERE sequence>? AND current!=0 AND deleted=0 " + "AND revs.doc_id = docs.doc_id " + "ORDER BY revs.doc_id, revid DESC", selectArgs); cursor.MoveToNext(); long lastDocID = 0; while (!cursor.IsAfterLast()) { long docID = cursor.GetLong(0); if (docID != lastDocID) { // Only look at the first-iterated revision of any document, // because this is the // one with the highest revid, hence the "winning" revision // of a conflict. lastDocID = docID; // Reconstitute the document as a dictionary: sequence = cursor.GetLong(1); string docId = cursor.GetString(2); if (docId.StartsWith("_design/")) { // design docs don't get indexed! cursor.MoveToNext(); continue; } string revId = cursor.GetString(3); byte[] json = cursor.GetBlob(4); IDictionary<string, object> properties = database.DocumentPropertiesFromJSON(json , docId, revId, false, sequence, EnumSet.NoneOf<Database.TDContentOptions>()); if (properties != null) { // Call the user-defined map() to emit new key/value // pairs from this revision: Log.V(Database.Tag, " call map for sequence=" + System.Convert.ToString(sequence )); emitBlock.SetSequence(sequence); mapBlock.Map(properties, emitBlock); } } cursor.MoveToNext(); } // Finally, record the last revision sequence number that was // indexed: ContentValues updateValues = new ContentValues(); updateValues.Put("lastSequence", dbMaxSequence); string[] whereArgs_1 = new string[] { Sharpen.Extensions.ToString(GetViewId()) }; database.GetDatabase().Update("views", updateValues, "view_id=?", whereArgs_1); // FIXME actually count number added :) Log.V(Database.Tag, "...Finished re-indexing view " + name + " up to sequence " + System.Convert.ToString(dbMaxSequence) + " (deleted " + deleted + " added " + "?" + ")"); result.SetCode(Status.Ok); } catch (SQLException e) { throw new CouchbaseLiteException(e, new Status(Status.DbError)); } finally { if (cursor != null) { cursor.Close(); } if (!result.IsSuccessful()) { Log.W(Database.Tag, "Failed to rebuild view " + name + ": " + result.GetCode()); } if (database != null) { database.EndTransaction(result.IsSuccessful()); } } }