public IList <QueryRow> QueryWithOptions(QueryOptions options) { if (options == null) { options = new QueryOptions(); } Cursor cursor = null; IList <QueryRow> rows = new AList <QueryRow>(); try { cursor = ResultSetWithOptions(options); int groupLevel = options.GetGroupLevel(); bool group = options.IsGroup() || (groupLevel > 0); bool reduce = options.IsReduce() || group; if (reduce && (reduceBlock == null) && !group) { Log.W(Log.TagView, "Cannot use reduce option in view %s which has no reduce block defined" , name); throw new CouchbaseLiteException(new Status(Status.BadRequest)); } if (reduce || group) { // Reduced or grouped query: rows = ReducedQuery(cursor, group, groupLevel); } else { // regular query cursor.MoveToNext(); while (!cursor.IsAfterLast()) { JsonDocument keyDoc = new JsonDocument(cursor.GetBlob(0)); JsonDocument valueDoc = new JsonDocument(cursor.GetBlob(1)); string docId = cursor.GetString(2); int sequence = Sharpen.Extensions.ValueOf(cursor.GetString(3)); IDictionary <string, object> docContents = null; if (options.IsIncludeDocs()) { object valueObject = valueDoc.JsonObject(); // http://wiki.apache.org/couchdb/Introduction_to_CouchDB_views#Linked_documents if (valueObject is IDictionary && ((IDictionary)valueObject).ContainsKey("_id")) { string linkedDocId = (string)((IDictionary)valueObject).Get("_id"); RevisionInternal linkedDoc = database.GetDocumentWithIDAndRev(linkedDocId, null, EnumSet.NoneOf <Database.TDContentOptions>()); docContents = linkedDoc.GetProperties(); } else { docContents = database.DocumentPropertiesFromJSON(cursor.GetBlob(5), docId, cursor .GetString(4), false, cursor.GetLong(3), options.GetContentOptions()); } } QueryRow row = new QueryRow(docId, sequence, keyDoc.JsonObject(), valueDoc.JsonObject (), docContents); row.SetDatabase(database); rows.AddItem(row); cursor.MoveToNext(); } } } catch (SQLException e) { string errMsg = string.Format("Error querying view: %s", this); Log.E(Log.TagView, errMsg, e); throw new CouchbaseLiteException(errMsg, e, new Status(Status.DbError)); } finally { if (cursor != null) { cursor.Close(); } } return(rows); }
/// <summary>Constructor</summary> internal SavedRevision(Document document, RevisionInternal revision) : base(document) { RevisionInternal = revision; }
internal SavedRevision(Database database, RevisionInternal revision, string parentRevId) : this(database, revision) { _parentRevID = parentRevId; }
/// <summary>Fetches the contents of a revision from the remote db, including its parent revision ID. /// </summary> /// <remarks> /// Fetches the contents of a revision from the remote db, including its parent revision ID. /// The contents are stored into rev.properties. /// </remarks> private void PullRemoteRevision(RevisionInternal rev) { // Construct a query. We want the revision history, and the bodies of attachments that have // been added since the latest revisions we have locally. // See: http://wiki.apache.org/couchdb/HTTP_Document_API#Getting_Attachments_With_a_Document var path = new StringBuilder($"/{Uri.EscapeUriString(rev.DocID)}?rev={Uri.EscapeUriString(rev.RevID.ToString())}&revs=true"); var attachments = true; if (attachments) { // TODO: deferred attachments path.Append("&attachments=true"); } // Include atts_since with a list of possible ancestor revisions of rev. If getting attachments, // this allows the server to skip the bodies of attachments that have not changed since the // local ancestor. The server can also trim the revision history it returns, to not extend past // the local ancestor (not implemented yet in SG but will be soon.) var knownRevs = default(IList <RevisionID>); ValueTypePtr <bool> haveBodies = false; try { knownRevs = LocalDatabase.Storage.GetPossibleAncestors(rev, MaxAttsSince, haveBodies)?.ToList(); } catch (Exception e) { Log.To.Sync.W(TAG, "Error getting possible ancestors (probably database closed)", e); } if (knownRevs != null) { path.Append(haveBodies ? "&atts_since=" : "&revs_from="); path.Append(JoinQuotedEscaped(knownRevs.Select(x => x.ToString()).ToList())); } else { // If we don't have any revisions at all, at least tell the server how long a history we // can keep track of: var maxRevTreeDepth = LocalDatabase.GetMaxRevTreeDepth(); if (rev.Generation > maxRevTreeDepth) { path.AppendFormat("&revs_limit={0}", maxRevTreeDepth); } } var pathInside = path.ToString(); Log.To.SyncPerf.I(TAG, "{0} getting {1}", this, rev); Log.To.Sync.V(TAG, "{0} GET {1}", this, new SecureLogString(pathInside, LogMessageSensitivity.PotentiallyInsecure)); _remoteSession.SendAsyncMultipartDownloaderRequest(HttpMethod.Get, pathInside, null, LocalDatabase, (result, e) => { // OK, now we've got the response revision: Log.To.SyncPerf.I(TAG, "{0} got {1}", this, rev); if (e != null) { Log.To.Sync.I(TAG, String.Format("{0} error pulling remote revision", this), e); LastError = e; RevisionFailed(); SafeIncrementCompletedChangesCount(); if (IsDocumentError(e)) { // Make sure this document is skipped because it is not available // even though the server is functioning _pendingSequences.RemoveSequence(rev.Sequence); LastSequence = _pendingSequences.GetCheckpointedValue(); } } else { var properties = result.AsDictionary <string, object>(); var gotRev = new PulledRevision(properties); gotRev.Sequence = rev.Sequence; if (_downloadsToInsert != null) { if (!_downloadsToInsert.QueueObject(gotRev)) { Log.To.Sync.W(TAG, "{0} failed to queue {1} for download because it is already queued, marking completed...", this, rev); SafeIncrementCompletedChangesCount(); } } else { Log.To.Sync.E(TAG, "downloadsToInsert is null"); } } // Note that we've finished this task; then start another one if there // are still revisions waiting to be pulled: PullRemoteRevisions(); }); }
private void ImportDoc(string docID, long docNumericID) { // CREATE TABLE revs ( // sequence INTEGER PRIMARY KEY AUTOINCREMENT, // doc_id INTEGER NOT NULL REFERENCES docs(doc_id) ON DELETE CASCADE, // revid TEXT NOT NULL COLLATE REVID, // parent INTEGER REFERENCES revs(sequence) ON DELETE SET NULL, // current BOOLEAN, // deleted BOOLEAN DEFAULT 0, // json BLOB, // no_attachments BOOLEAN, // UNIQUE (doc_id, revid) ); sqlite3_stmt revQuery = null; _inner.PrepareSQL(ref revQuery, "SELECT sequence, revid, parent, current, deleted, json, no_attachments" + " FROM revs WHERE doc_id=? ORDER BY sequence"); raw.sqlite3_bind_int64(revQuery, 1, docNumericID); var tree = new Dictionary <long, IList <object> >(); int err; while (raw.SQLITE_ROW == (err = raw.sqlite3_step(revQuery))) { long sequence = raw.sqlite3_column_int64(revQuery, 0); string revID = raw.sqlite3_column_text(revQuery, 1); long parentSeq = raw.sqlite3_column_int64(revQuery, 2); bool current = raw.sqlite3_column_int(revQuery, 3) != 0; bool noAtts = raw.sqlite3_column_int(revQuery, 6) != 0; if (current) { // Add a leaf revision: bool deleted = raw.sqlite3_column_int(revQuery, 4) != 0; IEnumerable <byte> json = raw.sqlite3_column_blob(revQuery, 5); if (json == null) { json = Encoding.UTF8.GetBytes("{}"); } var nuJson = json.ToList(); if (!noAtts) { try { UpdateAttachmentFollows(nuJson); } catch (CouchbaseLiteException) { Log.To.Upgrade.E(TAG, "Failed to process attachments, rethrowing..."); throw; } catch (Exception e) { throw Misc.CreateExceptionAndLog(Log.To.Upgrade, e, TAG, "Error processing attachments"); } } json = nuJson; RevisionInternal rev = new RevisionInternal(docID, revID, deleted); rev.SetJson(json); var history = new List <string>(); history.Add(revID); while (parentSeq > 0) { var ancestor = tree.Get(parentSeq); Debug.Assert(ancestor != null, String.Format("Couldn't find parent sequence of {0} (doc {1})", parentSeq, docID)); history.Add((string)ancestor[0]); parentSeq = (long)ancestor[1]; } Log.To.Upgrade.V(TAG, "Upgrading doc {0} history {1}", rev, Manager.GetObjectMapper().WriteValueAsString(history)); try { _db.ForceInsert(rev, history, null); } catch (CouchbaseLiteException) { Log.To.Upgrade.E(TAG, "Failed to insert revision {0} into target database, rethrowing...", rev); raw.sqlite3_finalize(revQuery); throw; } catch (Exception e) { raw.sqlite3_finalize(revQuery); throw Misc.CreateExceptionAndLog(Log.To.Upgrade, e, TAG, "Error inserting revision {0} into target database", rev); } NumRevs++; } else { tree[sequence] = new List <object> { revID, parentSeq }; } } raw.sqlite3_finalize(revQuery); ++NumDocs; if (err != raw.SQLITE_OK) { var s = SqliteErrToStatus(err); if (s.IsError) { throw Misc.CreateExceptionAndLog(Log.To.Upgrade, s.Code, TAG, "SQLite error during upgrade ({0})", err); } } }
private bool UploadMultipartRevision(RevisionInternal revision) { MultipartContent multiPart = null; var revProps = revision.GetProperties(); var attachments = revProps.Get("_attachments").AsDictionary <string, object>(); foreach (var attachmentKey in attachments.Keys) { var attachment = attachments.Get(attachmentKey).AsDictionary <string, object>(); if (attachment.ContainsKey("follows")) { if (multiPart == null) { multiPart = new MultipartContent("related"); try { var json = Manager.GetObjectMapper().WriteValueAsString(revProps); var utf8charset = Encoding.UTF8; //multiPart.Add(new StringContent(json, utf8charset, "application/json"), "param1"); var jsonContent = new StringContent(json, utf8charset, "application/json"); //jsonContent.Headers.ContentDisposition = new ContentDispositionHeaderValue("attachment"); multiPart.Add(jsonContent); } catch (IOException e) { throw new ArgumentException("Not able to serialize revision properties into a multipart request content.", e); } } var blobStore = LocalDatabase.Attachments; var base64Digest = (string)attachment.Get("digest"); var blobKey = new BlobKey(base64Digest); var inputStream = blobStore.BlobStreamForKey(blobKey); if (inputStream == null) { Log.W(Tag, "Unable to find blob file for blobKey: " + blobKey + " - Skipping upload of multipart revision."); multiPart = null; } else { string contentType = null; if (attachment.ContainsKey("content_type")) { contentType = (string)attachment.Get("content_type"); } else { if (attachment.ContainsKey("content-type")) { var message = string.Format("Found attachment that uses content-type" + " field name instead of content_type (see couchbase-lite-android" + " issue #80): " + attachment); Log.W(Tag, message); } } var content = new StreamContent(inputStream); content.Headers.ContentDisposition = new ContentDispositionHeaderValue("attachment") { FileName = Path.GetFileName(blobStore.PathForKey(blobKey)) }; content.Headers.ContentType = new MediaTypeHeaderValue(contentType); multiPart.Add(content); } } } if (multiPart == null) { return(false); } var path = string.Format("/{0}?new_edits=false", revision.GetDocId()); // TODO: need to throttle these requests Log.D(Tag, "Uploading multipart request. Revision: " + revision); Log.D(Tag, "uploadMultipartRevision() calling asyncTaskStarted()"); ChangesCount += 1; AsyncTaskStarted(); SendAsyncMultipartRequest(HttpMethod.Put, path, multiPart, (result, e) => { try { if (e != null) { var httpError = e as HttpResponseException; if (httpError != null) { if (httpError.StatusCode == System.Net.HttpStatusCode.UnsupportedMediaType) { dontSendMultipart = true; UploadJsonRevision(revision); } } else { Log.E(Tag, "Exception uploading multipart request", e); SetLastError(e); RevisionFailed(); } } else { Log.D(Tag, "Uploaded multipart request. Result: " + result); RemovePending(revision); } } finally { Log.D(Tag, "uploadMultipartRevision() calling asyncTaskFinished()"); // TODO: calling addToCompleteChangesCount(1) AsyncTaskFinished(1); } }); return(true); }
/// <summary> /// Attempt to update a document based on the information in the HTTP request /// </summary> /// <returns>The resulting status of the operation</returns> /// <param name="context">The request context</param> /// <param name="db">The database in which the document exists</param> /// <param name="docId">The ID of the document being updated</param> /// <param name="body">The new document body</param> /// <param name="deleting">Whether or not the document is being deleted</param> /// <param name="allowConflict">Whether or not to allow a conflict to be inserted</param> /// <param name="outRev">The resulting revision of the document</param> public static StatusCode UpdateDocument(ICouchbaseListenerContext context, Database db, string docId, Body body, bool deleting, bool allowConflict, out RevisionInternal outRev) { outRev = null; if (body != null && !body.IsValidJSON()) { return(StatusCode.BadJson); } string prevRevId; if (!deleting) { var properties = body.GetProperties(); deleting = properties.GetCast <bool>("_deleted"); if (docId == null) { // POST's doc ID may come from the _id field of the JSON body. docId = properties.CblID(); if (docId == null && deleting) { return(StatusCode.BadId); } } // PUT's revision ID comes from the JSON body. prevRevId = properties.GetCast <string>("_rev"); } else { // DELETE's revision ID comes from the ?rev= query param prevRevId = context.GetQueryParam("rev"); } // A backup source of revision ID is an If-Match header: if (prevRevId == null) { prevRevId = context.IfMatch(); } if (docId == null && deleting) { return(StatusCode.BadId); } RevisionInternal rev = new RevisionInternal(docId, null, deleting); rev.SetBody(body); // Check for doc expiration var expirationTime = default(DateTime?); var tmp = default(object); var props = rev.GetProperties(); var hasValue = false; if (props != null && props.TryGetValue("_exp", out tmp)) { hasValue = true; if (tmp != null) { if (tmp is DateTime || tmp is DateTimeOffset) { expirationTime = (DateTime)tmp; } else { try { expirationTime = Convert.ToDateTime(tmp); } catch (Exception) { try { var num = Convert.ToInt64(tmp); expirationTime = Misc.OffsetFromEpoch(TimeSpan.FromSeconds(num)); } catch (Exception) { Log.To.Router.E(TAG, "Invalid value for _exp: {0}", tmp); return(StatusCode.BadRequest); } } } } props.Remove("_exp"); rev.SetProperties(props); } var castContext = context as ICouchbaseListenerContext2; var source = castContext != null && !castContext.IsLoopbackRequest ? castContext.Sender : null; StatusCode status = deleting ? StatusCode.Ok : StatusCode.Created; try { if (docId != null && docId.StartsWith("_local")) { if (expirationTime.HasValue) { return(StatusCode.BadRequest); } Log.To.Router.I(TAG, "Attempting to insert local {0} on top of {1} from PUT request", rev, prevRevId != null ? prevRevId.ToString() : "<root>"); outRev = db.Storage.PutLocalRevision(rev, prevRevId.AsRevID(), true); //TODO: Doesn't match iOS } else { Log.To.Router.I(TAG, "Attempting to insert {0} on top of {1} from PUT request", rev, prevRevId != null ? prevRevId.ToString() : "<root>"); outRev = db.PutRevision(rev, prevRevId.AsRevID(), allowConflict, source); if (hasValue) { db.Storage?.SetDocumentExpiration(rev.DocID, expirationTime); } } } catch (CouchbaseLiteException e) { status = e.Code; } return(status); }
public void TestValidations() { ValidateDelegate validator = (newRevision, context) => { Assert.IsNotNull(newRevision); Assert.IsNotNull(context); Assert.IsTrue(newRevision.Properties != null || newRevision.IsDeletion); validationCalled = true; bool hoopy = newRevision.IsDeletion || (newRevision.Properties.Get("towel") != null); Log.V(ValidationsTest.Tag, string.Format("--- Validating {0} --> {1}", newRevision.Properties, hoopy)); if (!hoopy) { context.Reject("Where's your towel?"); } return(hoopy); }; database.SetValidation("hoopy", validator); // POST a valid new document: IDictionary <string, object> props = new Dictionary <string, object>(); props["name"] = "Zaphod Beeblebrox"; props["towel"] = "velvet"; RevisionInternal rev = new RevisionInternal(props, database); Status status = new Status(); validationCalled = false; rev = database.PutRevision(rev, null, false, status); Assert.IsTrue(validationCalled); Assert.AreEqual(StatusCode.Created, status.GetCode()); // PUT a valid update: props["head_count"] = 3; rev.SetProperties(props); validationCalled = false; rev = database.PutRevision(rev, rev.GetRevId(), false, status); Assert.IsTrue(validationCalled); Assert.AreEqual(StatusCode.Created, status.GetCode()); // PUT an invalid update: Sharpen.Collections.Remove(props, "towel"); rev.SetProperties(props); validationCalled = false; bool gotExpectedError = false; try { rev = database.PutRevision(rev, rev.GetRevId(), false, status); } catch (CouchbaseLiteException e) { gotExpectedError = (e.GetCBLStatus().GetCode() == StatusCode.Forbidden); } Assert.IsTrue(validationCalled); Assert.IsTrue(gotExpectedError); // POST an invalid new document: props = new Dictionary <string, object>(); props["name"] = "Vogon"; props["poetry"] = true; rev = new RevisionInternal(props, database); validationCalled = false; gotExpectedError = false; try { rev = database.PutRevision(rev, null, false, status); } catch (CouchbaseLiteException e) { gotExpectedError = (e.GetCBLStatus().GetCode() == StatusCode.Forbidden); } Assert.IsTrue(validationCalled); Assert.IsTrue(gotExpectedError); // PUT a valid new document with an ID: props = new Dictionary <string, object>(); props["_id"] = "ford"; props["name"] = "Ford Prefect"; props["towel"] = "terrycloth"; rev = new RevisionInternal(props, database); validationCalled = false; rev = database.PutRevision(rev, null, false, status); Assert.IsTrue(validationCalled); Assert.AreEqual("ford", rev.GetDocId()); // DELETE a document: rev = new RevisionInternal(rev.GetDocId(), rev.GetRevId(), true, database); Assert.IsTrue(rev.IsDeleted()); validationCalled = false; rev = database.PutRevision(rev, rev.GetRevId(), false, status); Assert.IsTrue(validationCalled); // PUT an invalid new document: props = new Dictionary <string, object>(); props["_id"] = "petunias"; props["name"] = "Pot of Petunias"; rev = new RevisionInternal(props, database); validationCalled = false; gotExpectedError = false; try { rev = database.PutRevision(rev, null, false, status); } catch (CouchbaseLiteException e) { gotExpectedError = (e.GetCBLStatus().GetCode() == StatusCode.Forbidden); } Assert.IsTrue(validationCalled); Assert.IsTrue(gotExpectedError); }
/// <exception cref="Couchbase.Lite.CouchbaseLiteException"></exception> public virtual void TestValidations() { Validator validator = new _Validator_19(this); database.SetValidation("hoopy", validator); // POST a valid new document: IDictionary <string, object> props = new Dictionary <string, object>(); props.Put("name", "Zaphod Beeblebrox"); props.Put("towel", "velvet"); RevisionInternal rev = new RevisionInternal(props, database); Status status = new Status(); validationCalled = false; rev = database.PutRevision(rev, null, false, status); NUnit.Framework.Assert.IsTrue(validationCalled); NUnit.Framework.Assert.AreEqual(Status.Created, status.GetCode()); // PUT a valid update: props.Put("head_count", 3); rev.SetProperties(props); validationCalled = false; rev = database.PutRevision(rev, rev.GetRevId(), false, status); NUnit.Framework.Assert.IsTrue(validationCalled); NUnit.Framework.Assert.AreEqual(Status.Created, status.GetCode()); // PUT an invalid update: Sharpen.Collections.Remove(props, "towel"); rev.SetProperties(props); validationCalled = false; bool gotExpectedError = false; try { rev = database.PutRevision(rev, rev.GetRevId(), false, status); } catch (CouchbaseLiteException e) { gotExpectedError = (e.GetCBLStatus().GetCode() == Status.Forbidden); } NUnit.Framework.Assert.IsTrue(validationCalled); NUnit.Framework.Assert.IsTrue(gotExpectedError); // POST an invalid new document: props = new Dictionary <string, object>(); props.Put("name", "Vogon"); props.Put("poetry", true); rev = new RevisionInternal(props, database); validationCalled = false; gotExpectedError = false; try { rev = database.PutRevision(rev, null, false, status); } catch (CouchbaseLiteException e) { gotExpectedError = (e.GetCBLStatus().GetCode() == Status.Forbidden); } NUnit.Framework.Assert.IsTrue(validationCalled); NUnit.Framework.Assert.IsTrue(gotExpectedError); // PUT a valid new document with an ID: props = new Dictionary <string, object>(); props.Put("_id", "ford"); props.Put("name", "Ford Prefect"); props.Put("towel", "terrycloth"); rev = new RevisionInternal(props, database); validationCalled = false; rev = database.PutRevision(rev, null, false, status); NUnit.Framework.Assert.IsTrue(validationCalled); NUnit.Framework.Assert.AreEqual("ford", rev.GetDocId()); // DELETE a document: rev = new RevisionInternal(rev.GetDocId(), rev.GetRevId(), true, database); NUnit.Framework.Assert.IsTrue(rev.IsDeleted()); validationCalled = false; rev = database.PutRevision(rev, rev.GetRevId(), false, status); NUnit.Framework.Assert.IsTrue(validationCalled); // PUT an invalid new document: props = new Dictionary <string, object>(); props.Put("_id", "petunias"); props.Put("name", "Pot of Petunias"); rev = new RevisionInternal(props, database); validationCalled = false; gotExpectedError = false; try { rev = database.PutRevision(rev, null, false, status); } catch (CouchbaseLiteException e) { gotExpectedError = (e.GetCBLStatus().GetCode() == Status.Forbidden); } NUnit.Framework.Assert.IsTrue(validationCalled); NUnit.Framework.Assert.IsTrue(gotExpectedError); }
public virtual void TestPusher() { var replicationDoneSignal = new CountDownLatch(1); var remote = GetReplicationURL(); var docIdTimestamp = Convert.ToString(Runtime.CurrentTimeMillis()); // Create some documents: var documentProperties = new Dictionary <string, object>(); var doc1Id = string.Format("doc1-{0}", docIdTimestamp); documentProperties["_id"] = doc1Id; documentProperties["foo"] = 1; documentProperties["bar"] = false; var body = new Body(documentProperties); var rev1 = new RevisionInternal(body, database); var status = new Status(); rev1 = database.PutRevision(rev1, null, false, status); Assert.AreEqual(StatusCode.Created, status.GetCode()); documentProperties.Put("_rev", rev1.GetRevId()); documentProperties["UPDATED"] = true; var rev2 = database.PutRevision(new RevisionInternal(documentProperties, database), rev1.GetRevId(), false, status); Assert.AreEqual(StatusCode.Created, status.GetCode()); documentProperties = new Dictionary <string, object>(); var doc2Id = string.Format("doc2-{0}", docIdTimestamp); documentProperties["_id"] = doc2Id; documentProperties["baz"] = 666; documentProperties["fnord"] = true; database.PutRevision(new RevisionInternal(documentProperties, database), null, false, status); Assert.AreEqual(StatusCode.Created, status.GetCode()); var continuous = false; var repl = database.CreatePushReplication(remote); repl.Continuous = continuous; //repl.CreateTarget = false; // Check the replication's properties: Assert.AreEqual(database, repl.LocalDatabase); Assert.AreEqual(remote, repl.RemoteUrl); Assert.IsFalse(repl.IsPull); Assert.IsFalse(repl.Continuous); //Assert.IsTrue(repl.CreateTarget); Assert.IsNull(repl.Filter); Assert.IsNull(repl.FilterParams); // TODO: CAssertNil(r1.doc_ids); // TODO: CAssertNil(r1.headers); // Check that the replication hasn't started running: Assert.IsFalse(repl.IsRunning); Assert.AreEqual((int)repl.Status, (int)ReplicationStatus.Stopped); Assert.AreEqual(0, repl.CompletedChangesCount); Assert.AreEqual(0, repl.ChangesCount); Assert.IsNull(repl.LastError); RunReplication(repl); // make sure doc1 is there // TODO: make sure doc2 is there (refactoring needed) var replicationUrlTrailing = new Uri(string.Format("{0}/", remote)); var pathToDoc = new Uri(replicationUrlTrailing, doc1Id); Log.D(Tag, "Send http request to " + pathToDoc); var httpRequestDoneSignal = new CountDownLatch(1); var getDocTask = Task.Factory.StartNew(() => { var httpclient = new HttpClient(); HttpResponseMessage response; string responseString = null; try { var responseTask = httpclient.GetAsync(pathToDoc.ToString()); responseTask.Wait(TimeSpan.FromSeconds(10)); response = responseTask.Result; var statusLine = response.StatusCode; NUnit.Framework.Assert.IsTrue(statusLine == HttpStatusCode.OK); if (statusLine == HttpStatusCode.OK) { var responseStringTask = response.Content.ReadAsStringAsync(); responseStringTask.Wait(TimeSpan.FromSeconds(10)); responseString = responseStringTask.Result; NUnit.Framework.Assert.IsTrue(responseString.Contains(doc1Id)); Log.D(ReplicationTest.Tag, "result: " + responseString); } else { var statusReason = response.ReasonPhrase; response.Dispose(); throw new IOException(statusReason); } } catch (ProtocolViolationException e) { NUnit.Framework.Assert.IsNull(e, "Got ClientProtocolException: " + e.Message); } catch (IOException e) { NUnit.Framework.Assert.IsNull(e, "Got IOException: " + e.Message); } httpRequestDoneSignal.CountDown(); }); //Closes the connection. Log.D(Tag, "Waiting for http request to finish"); try { var result = httpRequestDoneSignal.Await(TimeSpan.FromSeconds(10)); Assert.IsTrue(result, "Could not retrieve the new doc from the sync gateway."); Log.D(Tag, "http request finished"); } catch (Exception e) { Sharpen.Runtime.PrintStackTrace(e); } Log.D(Tag, "testPusher() finished"); }
private bool RevIdGreaterThanCurrent(string revId) { return(RevisionInternal.CBLCompareRevIDs(revId, currentRevision.Id) > 0); }
public virtual void TestPusherDeletedDoc() { Assert.Fail(); // TODO.ZJG: Needs debugging, overflows stack. CountDownLatch replicationDoneSignal = new CountDownLatch(1); Uri remote = GetReplicationURL(); string docIdTimestamp = System.Convert.ToString(Runtime.CurrentTimeMillis()); // Create some documentsConvert IDictionary <string, object> documentProperties = new Dictionary <string, object>(); string doc1Id = string.Format("doc1-{0}", docIdTimestamp); documentProperties["_id"] = doc1Id; documentProperties["foo"] = 1; documentProperties["bar"] = false; Body body = new Body(documentProperties); RevisionInternal rev1 = new RevisionInternal(body, database); Status status = new Status(); rev1 = database.PutRevision(rev1, null, false, status); NUnit.Framework.Assert.AreEqual(StatusCode.Created, status.GetCode()); documentProperties["_rev"] = rev1.GetRevId(); documentProperties["UPDATED"] = true; documentProperties["_deleted"] = true; RevisionInternal rev2 = database.PutRevision(new RevisionInternal(documentProperties , database), rev1.GetRevId(), false, status); NUnit.Framework.Assert.IsTrue((int)status.GetCode() >= 200 && (int)status.GetCode() < 300); var repl = database.CreatePushReplication(remote); ((Pusher)repl).CreateTarget = true; RunReplication(repl); // make sure doc1 is deleted Uri replicationUrlTrailing = new Uri(string.Format("{0}/", remote.ToString() )); Uri pathToDoc = new Uri(replicationUrlTrailing, doc1Id); Log.D(Tag, "Send http request to " + pathToDoc); CountDownLatch httpRequestDoneSignal = new CountDownLatch(1); var getDocTask = Task.Factory.StartNew(() => { var httpclient = new HttpClient(); HttpResponseMessage response; string responseString = null; try { var responseTask = httpclient.GetAsync(pathToDoc.ToString()); responseTask.Wait(); response = responseTask.Result; var statusLine = response.StatusCode; Log.D(ReplicationTest.Tag, "statusLine " + statusLine); Assert.AreEqual(HttpStatusCode.NotFound, statusLine.GetStatusCode()); } catch (ProtocolViolationException e) { NUnit.Framework.Assert.IsNull(e, "Got ClientProtocolException: " + e.Message); } catch (IOException e) { NUnit.Framework.Assert.IsNull(e, "Got IOException: " + e.Message); } finally { httpRequestDoneSignal.CountDown(); } }); getDocTask.Start(); Log.D(Tag, "Waiting for http request to finish"); try { httpRequestDoneSignal.Await(TimeSpan.FromSeconds(10)); Log.D(Tag, "http request finished"); } catch (Exception e) { Sharpen.Runtime.PrintStackTrace(e); } Log.D(Tag, "testPusherDeletedDoc() finished"); }
public void TestAttachments() { const string testAttachmentName = "test_attachment"; var attachments = database.Attachments; Assert.AreEqual(0, attachments.Count()); Assert.AreEqual(0, attachments.AllKeys().Count()); var attach1 = Encoding.UTF8.GetBytes("This is the body of attach1"); var props = new Dictionary <string, object> { { "foo", 1 }, { "bar", false }, { "_attachments", CreateAttachmentsDict(attach1, testAttachmentName, "text/plain", false) } }; Status status = new Status(); RevisionInternal rev1 = database.PutRevision(new RevisionInternal(props), null, false, status); Assert.AreEqual(StatusCode.Created, status.GetCode()); var att = database.GetAttachmentForRevision(rev1, testAttachmentName, status); Assert.IsNotNull(att, "Couldn't get attachment: Status {0}", status.GetCode()); Assert.AreEqual(attach1, att.Content); Assert.AreEqual("text/plain", att.ContentType); Assert.AreEqual(AttachmentEncoding.None, att.Encoding); var itemDict = new Dictionary <string, object> { { "content_type", "text/plain" }, { "digest", "sha1-gOHUOBmIMoDCrMuGyaLWzf1hQTE=" }, { "length", 27 }, { "stub", true }, { "revpos", 1 } }; var attachmentDict = new Dictionary <string, object> { { testAttachmentName, itemDict } }; var gotRev1 = database.GetDocumentWithIDAndRev(rev1.GetDocId(), rev1.GetRevId(), DocumentContentOptions.None); AssertDictionariesAreEqual(attachmentDict, gotRev1.GetAttachments()); itemDict.Remove("stub"); itemDict["data"] = Convert.ToBase64String(attach1); gotRev1 = database.GetDocumentWithIDAndRev(rev1.GetDocId(), rev1.GetRevId(), DocumentContentOptions.IncludeAttachments); var expandedRev = gotRev1.CopyWithDocID(rev1.GetDocId(), rev1.GetRevId()); Assert.IsTrue(database.ExpandAttachments(expandedRev, 0, false, true, status)); AssertDictionariesAreEqual(attachmentDict, expandedRev.GetAttachments()); // Add a second revision that doesn't update the attachment: props = new Dictionary <string, object> { { "_id", rev1.GetDocId() }, { "foo", 2 }, { "bazz", false }, { "_attachments", CreateAttachmentsStub(testAttachmentName) } }; var rev2 = database.PutRevision(new RevisionInternal(props), rev1.GetRevId(), status); Assert.AreEqual(StatusCode.Created, status.GetCode()); // Add a third revision of the same document: var attach2 = Encoding.UTF8.GetBytes("<html>And this is attach2</html>"); props = new Dictionary <string, object> { { "_id", rev2.GetDocId() }, { "foo", 2 }, { "bazz", false }, { "_attachments", CreateAttachmentsDict(attach2, testAttachmentName, "text/html", false) } }; var rev3 = database.PutRevision(new RevisionInternal(props), rev2.GetRevId(), status); Assert.AreEqual(StatusCode.Created, status.GetCode()); // Check the second revision's attachment att = database.GetAttachmentForRevision(rev2, testAttachmentName, status); Assert.IsNotNull(att, "Couldn't get attachment: Status {0}", status.GetCode()); Assert.AreEqual(attach1, att.Content); Assert.AreEqual("text/plain", att.ContentType); Assert.AreEqual(AttachmentEncoding.None, att.Encoding); expandedRev = rev2.CopyWithDocID(rev2.GetDocId(), rev2.GetRevId()); Assert.IsTrue(database.ExpandAttachments(expandedRev, 2, false, true, status)); AssertDictionariesAreEqual(new Dictionary <string, object> { { testAttachmentName, new Dictionary <string, object> { { "stub", true }, { "revpos", 1 } } } }, expandedRev.GetAttachments()); // Check the 3rd revision's attachment: att = database.GetAttachmentForRevision(rev3, testAttachmentName, status); Assert.IsNotNull(att, "Couldn't get attachment: Status {0}", status.GetCode()); Assert.AreEqual(attach2, att.Content); Assert.AreEqual("text/html", att.ContentType); Assert.AreEqual(AttachmentEncoding.None, att.Encoding); expandedRev = rev3.CopyWithDocID(rev3.GetDocId(), rev3.GetRevId()); Assert.IsTrue(database.ExpandAttachments(expandedRev, 2, false, true, status)); attachmentDict = new Dictionary <string, object> { { testAttachmentName, new Dictionary <string, object> { { "content_type", "text/html" }, { "data", "PGh0bWw+QW5kIHRoaXMgaXMgYXR0YWNoMjwvaHRtbD4=" }, { "digest", "sha1-s14XRTXlwvzYfjo1t1u0rjB+ZUA=" }, { "length", 32 }, { "revpos", 3 } } } }; AssertDictionariesAreEqual(attachmentDict, expandedRev.GetAttachments()); // Examine the attachment store: Assert.AreEqual(2, attachments.Count()); Assert.AreEqual(new HashSet <BlobKey> { BlobStore.KeyForBlob(attach1), BlobStore.KeyForBlob(attach2) }, attachments.AllKeys()); database.Compact(); Assert.AreEqual(1, attachments.Count()); Assert.AreEqual(new HashSet <BlobKey> { BlobStore.KeyForBlob(attach2) }, attachments.AllKeys()); }
public virtual void TestPutAttachment() { const string testAttachmentName = "test_attachment"; var attachments = database.Attachments; attachments.DeleteBlobs(); Assert.AreEqual(0, attachments.Count()); // Put a revision that includes an _attachments dict: var attach1 = Encoding.UTF8.GetBytes("This is the body of attach1"); var base64 = Convert.ToBase64String(attach1); var attachment = new Dictionary <string, object>(); attachment["content_type"] = "text/plain"; attachment["data"] = base64; IDictionary <string, object> attachmentDict = new Dictionary <string, object>(); attachmentDict[testAttachmentName] = attachment; var properties = new Dictionary <string, object>(); properties["foo"] = 1; properties["bar"] = false; properties["_attachments"] = attachmentDict; var rev1 = database.PutRevision(new RevisionInternal(properties), null, false); // Examine the attachment store: Assert.AreEqual(1, attachments.Count()); // Get the revision: var gotRev1 = database.GetDocumentWithIDAndRev(rev1.GetDocId(), rev1.GetRevId(), DocumentContentOptions.None); var gotAttachmentDict = gotRev1.GetPropertyForKey("_attachments").AsDictionary <string, object>(); var innerDict = new JObject(); innerDict["content_type"] = "text/plain"; innerDict["digest"] = "sha1-gOHUOBmIMoDCrMuGyaLWzf1hQTE="; innerDict["length"] = 27; innerDict["stub"] = true; innerDict["revpos"] = 1; var expectAttachmentDict = new Dictionary <string, object>(); expectAttachmentDict[testAttachmentName] = innerDict; Assert.AreEqual(expectAttachmentDict, gotAttachmentDict); // Update the attachment directly: var attachv2 = Encoding.UTF8.GetBytes("Replaced body of attach"); var writer = new BlobStoreWriter(database.Attachments); writer.AppendData(attachv2); writer.Finish(); var gotExpectedErrorCode = false; try { database.UpdateAttachment(testAttachmentName, writer, "application/foo", AttachmentEncoding.None, rev1.GetDocId(), null); } catch (CouchbaseLiteException e) { gotExpectedErrorCode = (e.GetCBLStatus().GetCode() == StatusCode.Conflict); } Assert.IsTrue(gotExpectedErrorCode); gotExpectedErrorCode = false; try { database.UpdateAttachment(testAttachmentName, new BlobStoreWriter(database.Attachments), "application/foo", AttachmentEncoding.None, rev1.GetDocId(), "1-bogus"); } catch (CouchbaseLiteException e) { gotExpectedErrorCode = (e.GetCBLStatus().GetCode() == StatusCode.Conflict); } Assert.IsTrue(gotExpectedErrorCode); gotExpectedErrorCode = false; RevisionInternal rev2 = null; try { rev2 = database.UpdateAttachment(testAttachmentName, writer, "application/foo", AttachmentEncoding.None, rev1.GetDocId(), rev1.GetRevId()); } catch (CouchbaseLiteException) { gotExpectedErrorCode = true; } Assert.IsFalse(gotExpectedErrorCode); Assert.AreEqual(rev1.GetDocId(), rev2.GetDocId()); Assert.AreEqual(2, rev2.GetGeneration()); // Get the updated revision: RevisionInternal gotRev2 = database.GetDocumentWithIDAndRev(rev2.GetDocId(), rev2 .GetRevId(), DocumentContentOptions.None); attachmentDict = gotRev2.GetProperties().Get("_attachments").AsDictionary <string, object>(); innerDict = new JObject(); innerDict["content_type"] = "application/foo"; innerDict["digest"] = "sha1-mbT3208HI3PZgbG4zYWbDW2HsPk="; innerDict["length"] = 23; innerDict["stub"] = true; innerDict["revpos"] = 2; expectAttachmentDict[testAttachmentName] = innerDict; Assert.AreEqual(expectAttachmentDict, attachmentDict); // Delete the attachment: gotExpectedErrorCode = false; try { database.UpdateAttachment("nosuchattach", null, "application/foo", AttachmentEncoding.None, rev2.GetDocId(), rev2.GetRevId()); } catch (CouchbaseLiteException e) { gotExpectedErrorCode = (e.GetCBLStatus().GetCode() == StatusCode.NotFound); } Assert.IsTrue(gotExpectedErrorCode); gotExpectedErrorCode = false; try { database.UpdateAttachment("nosuchattach", null, null, AttachmentEncoding.None, "nosuchdoc", "nosuchrev"); } catch (CouchbaseLiteException e) { gotExpectedErrorCode = (e.GetCBLStatus().GetCode() == StatusCode.NotFound); } Assert.IsTrue(gotExpectedErrorCode); RevisionInternal rev3 = database.UpdateAttachment(testAttachmentName, null, null, AttachmentEncoding.None, rev2.GetDocId(), rev2.GetRevId()); Assert.AreEqual(rev2.GetDocId(), rev3.GetDocId()); Assert.AreEqual(3, rev3.GetGeneration()); // Get the updated revision: RevisionInternal gotRev3 = database.GetDocumentWithIDAndRev(rev3.GetDocId(), rev3 .GetRevId(), DocumentContentOptions.None); attachmentDict = gotRev3.GetProperties().Get("_attachments").AsDictionary <string, object>(); Assert.IsNull(attachmentDict); database.Close(); }
public static ICouchbaseResponseState RevsDiff(ICouchbaseListenerContext context) { // Collect all of the input doc/revision IDs as CBL_Revisions: var revs = new RevisionList(); var body = context.BodyAs <Dictionary <string, object> >(); if (body == null) { return(context.CreateResponse(StatusCode.BadJson).AsDefaultState()); } foreach (var docPair in body) { var revIDs = docPair.Value.AsList <string>(); if (revIDs == null) { return(context.CreateResponse(StatusCode.BadParam).AsDefaultState()); } foreach (var revID in revIDs) { var rev = new RevisionInternal(docPair.Key, revID, false); revs.Add(rev); } } return(PerformLogicWithDatabase(context, true, db => { var response = context.CreateResponse(); // Look them up, removing the existing ones from revs: db.Storage.FindMissingRevisions(revs); // Return the missing revs in a somewhat different format: IDictionary <string, object> diffs = new Dictionary <string, object>(); foreach (var rev in revs) { var docId = rev.GetDocId(); IList <string> missingRevs = null; if (!diffs.ContainsKey(docId)) { missingRevs = new List <string>(); diffs[docId] = new Dictionary <string, IList <string> > { { "missing", missingRevs } }; } else { missingRevs = ((Dictionary <string, IList <string> >)diffs[docId])["missing"]; } missingRevs.Add(rev.GetRevId()); } // Add the possible ancestors for each missing revision: foreach (var docPair in diffs) { IDictionary <string, IList <string> > docInfo = (IDictionary <string, IList <string> >)docPair.Value; int maxGen = 0; string maxRevID = null; foreach (var revId in docInfo["missing"]) { var parsed = RevisionInternal.ParseRevId(revId); if (parsed.Item1 > maxGen) { maxGen = parsed.Item1; maxRevID = revId; } } var rev = new RevisionInternal(docPair.Key, maxRevID, false); var ancestors = db.Storage.GetPossibleAncestors(rev, 0, false); var ancestorList = ancestors == null ? null : ancestors.ToList(); if (ancestorList != null && ancestorList.Count > 0) { docInfo["possible_ancestors"] = ancestorList; } } response.JsonBody = new Body(diffs); return response; }).AsDefaultState()); }
// Get as many revisions as possible in one _all_docs request. // This is compatible with CouchDB, but it only works for revs of generation 1 without attachments. internal void PullBulkWithAllDocs(IList <RevisionInternal> bulkRevs) { // http://wiki.apache.org/couchdb/HTTP_Bulk_Document_API Log.V(Tag, "PullBulkWithAllDocs() calling AsyncTaskStarted()"); AsyncTaskStarted(); ++httpConnectionCount; var remainingRevs = new List <RevisionInternal>(bulkRevs); var keys = bulkRevs.Select(rev => rev.GetDocId()).ToArray(); var body = new Dictionary <string, object>(); body.Put("keys", keys); SendAsyncRequest(HttpMethod.Post, "/_all_docs?include_docs=true", body, (result, e) => { var res = result.AsDictionary <string, object>(); if (e != null) { SetLastError(e); RevisionFailed(); SafeAddToCompletedChangesCount(bulkRevs.Count); } else { // Process the resulting rows' documents. // We only add a document if it doesn't have attachments, and if its // revID matches the one we asked for. var rows = res.Get("rows").AsList <IDictionary <string, object> >(); Log.V(Tag, "Checking {0} bulk-fetched remote revisions", rows.Count); foreach (var row in rows) { var doc = row.Get("doc").AsDictionary <string, object>(); if (doc != null && doc.Get("_attachments") == null) { var rev = new RevisionInternal(doc); var pos = remainingRevs.IndexOf(rev); if (pos > -1) { rev.SetSequence(remainingRevs[pos].GetSequence()); remainingRevs.Remove(pos); QueueDownloadedRevision(rev); } } } } // Any leftover revisions that didn't get matched will be fetched individually: if (remainingRevs.Count > 0) { Log.V(Tag, "Bulk-fetch didn't work for {0} of {1} revs; getting individually", remainingRevs.Count, bulkRevs.Count); foreach (var rev in remainingRevs) { QueueRemoteRevision(rev); } PullRemoteRevisions(); } // Note that we've finished this task: Log.V(Tag, "PullBulkWithAllDocs() calling AsyncTaskFinished()"); AsyncTaskFinished(1); --httpConnectionCount; // Start another task if there are still revisions waiting to be pulled: PullRemoteRevisions(); }); }
public RevisionInternal PutRevision(string inDocId, string inPrevRevId, IDictionary <string, object> properties, bool deleting, bool allowConflict, StoreValidation validationBlock) { if (_config.HasFlag(C4DatabaseFlags.ReadOnly)) { throw new CouchbaseLiteException("Attempting to write to a readonly database", StatusCode.Forbidden); } var json = default(string); if (properties != null) { json = Manager.GetObjectMapper().WriteValueAsString(Database.StripDocumentJSON(properties), true); } else { json = "{}"; } if (inDocId == null) { inDocId = Misc.CreateGUID(); } var putRev = default(RevisionInternal); var change = default(DocumentChange); var success = RunInTransaction(() => { var docId = inDocId; var prevRevId = inPrevRevId; var transactionSuccess = false; WithC4Document(docId, null, false, true, doc => { if (prevRevId != null) { // Updating an existing revision; make sure it exists and is a leaf: ForestDBBridge.Check(err => Native.c4doc_selectRevision(doc, prevRevId, false, err)); if (!allowConflict && !doc->selectedRev.IsLeaf) { throw new CouchbaseLiteException(StatusCode.Conflict); } } else { // No parent revision given: if (deleting) { // Didn't specify a revision to delete: NotFound or a Conflict, depending throw new CouchbaseLiteException(doc->Exists ? StatusCode.Conflict : StatusCode.NotFound); } // If doc exists, current rev must be in a deleted state or there will be a conflict: if (Native.c4doc_selectCurrentRevision(doc)) { if (doc->selectedRev.IsDeleted) { // New rev will be child of the tombstone: prevRevId = (string)doc->revID; } else { throw new CouchbaseLiteException(StatusCode.Conflict); } } } // Compute the new revID. (Can't be done earlier because prevRevID may have changed.) var newRevID = Delegate != null ? Delegate.GenerateRevID(Encoding.UTF8.GetBytes(json), deleting, prevRevId) : null; if (newRevID == null) { throw new CouchbaseLiteException(StatusCode.BadId); } putRev = new RevisionInternal(docId, newRevID, deleting); if (properties != null) { properties["_id"] = docId; properties["_rev"] = newRevID; putRev.SetProperties(properties); } // Run any validation blocks: if (validationBlock != null) { var prevRev = default(RevisionInternal); if (prevRevId != null) { prevRev = new RevisionInternal(docId, prevRevId, doc->selectedRev.IsDeleted); } var status = validationBlock(putRev, prevRev, prevRevId); if (status.IsError) { throw new CouchbaseLiteException(String.Format("{0} failed validation", putRev), status.Code); } } // Add the revision to the database: ForestDBBridge.Check(err => Native.c4doc_insertRevision(doc, newRevID, json, deleting, putRev.GetAttachments() != null, allowConflict, err)); var isWinner = SaveDocument(doc, newRevID, properties); putRev.SetSequence((long)doc->sequence); change = ChangeWithNewRevision(putRev, isWinner, doc, null); transactionSuccess = true; }); return(transactionSuccess); }); if (!success) { return(null); } if (Delegate != null && change != null) { Delegate.DatabaseStorageChanged(change); } return(putRev); }
/// <summary>Fetches the contents of a revision from the remote db, including its parent revision ID. /// </summary> /// <remarks> /// Fetches the contents of a revision from the remote db, including its parent revision ID. /// The contents are stored into rev.properties. /// </remarks> internal void PullRemoteRevision(RevisionInternal rev) { Log.D(Tag, "PullRemoteRevision with rev: {0}", rev); Log.D(Tag, "PullRemoteRevision() calling AsyncTaskStarted()"); AsyncTaskStarted(); httpConnectionCount++; // Construct a query. We want the revision history, and the bodies of attachments that have // been added since the latest revisions we have locally. // See: http://wiki.apache.org/couchdb/HTTP_Document_API#Getting_Attachments_With_a_Document var path = new StringBuilder("/" + Uri.EscapeUriString(rev.GetDocId()) + "?rev=" + Uri.EscapeUriString(rev.GetRevId()) + "&revs=true&attachments=true"); var knownRevs = KnownCurrentRevIDs(rev); if (knownRevs == null) { //this means something is wrong, possibly the replicator has shut down Log.D(Tag, "PullRemoteRevision() calling AsyncTaskFinished()"); AsyncTaskFinished(1); httpConnectionCount--; return; } if (knownRevs.Count > 0) { path.Append("&atts_since="); path.Append(JoinQuotedEscaped(knownRevs)); } //create a final version of this variable for the log statement inside //FIXME find a way to avoid this var pathInside = path.ToString(); SendAsyncMultipartDownloaderRequest(HttpMethod.Get, pathInside, null, LocalDatabase, (result, e) => { try { // OK, now we've got the response revision: Log.D(Tag, "PullRemoteRevision got response for rev: " + rev); if (e != null) { Log.E(Tag, "Error pulling remote revision", e); SetLastError(e); RevisionFailed(); Log.D(Tag, "PullRemoteRevision updating completedChangesCount from " + CompletedChangesCount + " -> " + (CompletedChangesCount + 1) + " due to error pulling remote revision"); SafeIncrementCompletedChangesCount(); } else { var properties = result.AsDictionary <string, object>(); var gotRev = new PulledRevision(properties); gotRev.SetSequence(rev.GetSequence()); AsyncTaskStarted(); Log.D(Tag, "PullRemoteRevision add rev: " + gotRev + " to batcher"); if (downloadsToInsert != null) { downloadsToInsert.QueueObject(gotRev); } else { Log.E(Tag, "downloadsToInsert is null"); } } } finally { Log.D(Tag, "PullRemoteRevision.onCompletion() calling AsyncTaskFinished()"); AsyncTaskFinished(1); } // Note that we've finished this task; then start another one if there // are still revisions waiting to be pulled: --httpConnectionCount; PullRemoteRevisions(); }); }
public void TestStubOutAttachmentsInRevBeforeRevPos() { var hello = new JObject(); hello["revpos"] = 1; hello["follows"] = true; var goodbye = new JObject(); goodbye["revpos"] = 2; goodbye["data"] = "squeee"; var attachments = new JObject(); attachments["hello"] = hello; attachments["goodbye"] = goodbye; var properties = new Dictionary <string, object>(); properties["_attachments"] = attachments; IDictionary <string, object> expected = null; var rev = new RevisionInternal(properties); Database.StubOutAttachmentsInRevBeforeRevPos(rev, 3, false); var checkAttachments = rev.GetProperties()["_attachments"].AsDictionary <string, object>(); var result = (IDictionary <string, object>)checkAttachments["hello"]; expected = new Dictionary <string, object>(); expected["revpos"] = 1; expected["stub"] = true; AssertPropertiesAreEqual(expected, result); result = (IDictionary <string, object>)checkAttachments["goodbye"]; expected = new Dictionary <string, object>(); expected["revpos"] = 2; expected["stub"] = true; AssertPropertiesAreEqual(expected, result); rev = new RevisionInternal(properties); Database.StubOutAttachmentsInRevBeforeRevPos(rev, 2, false); checkAttachments = rev.GetProperties()["_attachments"].AsDictionary <string, object>(); result = checkAttachments["hello"].AsDictionary <string, object>(); expected = new Dictionary <string, object>(); expected["revpos"] = 1; expected["stub"] = true; AssertPropertiesAreEqual(expected, result); result = checkAttachments["goodbye"].AsDictionary <string, object>(); expected = goodbye.AsDictionary <string, object>(); AssertPropertiesAreEqual(expected, result); rev = new RevisionInternal(properties); Database.StubOutAttachmentsInRevBeforeRevPos(rev, 1, false); checkAttachments = rev.GetProperties()["_attachments"].AsDictionary <string, object>(); result = checkAttachments["hello"].AsDictionary <string, object>(); expected = hello.AsDictionary <string, object>(); AssertPropertiesAreEqual(expected, result); result = checkAttachments["goodbye"].AsDictionary <string, object>(); expected = goodbye.AsDictionary <string, object>(); AssertPropertiesAreEqual(expected, result); //Test the follows mode rev = new RevisionInternal(properties); Database.StubOutAttachmentsInRevBeforeRevPos(rev, 3, true); checkAttachments = rev.GetProperties()["_attachments"].AsDictionary <string, object>(); result = checkAttachments["hello"].AsDictionary <string, object>(); expected = new Dictionary <string, object>(); expected["revpos"] = 1; expected["stub"] = true; AssertPropertiesAreEqual(expected, result); result = checkAttachments["goodbye"].AsDictionary <string, object>(); expected = new Dictionary <string, object>(); expected["revpos"] = 2; expected["stub"] = true; AssertPropertiesAreEqual(expected, result); rev = new RevisionInternal(properties); Database.StubOutAttachmentsInRevBeforeRevPos(rev, 2, true); checkAttachments = rev.GetProperties()["_attachments"].AsDictionary <string, object>(); result = checkAttachments["hello"].AsDictionary <string, object>(); expected = new Dictionary <string, object>(); expected["revpos"] = 1; expected["stub"] = true; AssertPropertiesAreEqual(expected, result); result = checkAttachments["goodbye"].AsDictionary <string, object>(); expected = new Dictionary <string, object>(); expected["revpos"] = 2; expected["follows"] = true; AssertPropertiesAreEqual(expected, result); rev = new RevisionInternal(properties); Database.StubOutAttachmentsInRevBeforeRevPos(rev, 1, true); checkAttachments = rev.GetProperties()["_attachments"].AsDictionary <string, object>(); result = checkAttachments["hello"].AsDictionary <string, object>(); expected = new Dictionary <string, object>(); expected["revpos"] = 1; expected["follows"] = true; AssertPropertiesAreEqual(expected, result); result = checkAttachments["goodbye"].AsDictionary <string, object>(); expected = new Dictionary <string, object>(); expected["revpos"] = 2; expected["follows"] = true; AssertPropertiesAreEqual(expected, result); }
internal QueryRow(string documentId, long sequence, object key, object value, RevisionInternal revision, IQueryRowStore storage) { // Don't initialize _database yet. I might be instantiated on a background thread (if the // query is async) which has a different CBLDatabase instance than the original caller. // Instead, the database property will be filled in when I'm added to a CBLQueryEnumerator. SourceDocumentId = documentId; SequenceNumber = sequence; _key = key; _value = value; _documentRevision = revision; _storage = storage; }
// Apply the options in the URL query to the specified revision and create a new revision object internal static RevisionInternal ApplyOptions(DocumentContentOptions options, RevisionInternal rev, ICouchbaseListenerContext context, Database db, Status outStatus) { if ((options & (DocumentContentOptions.IncludeRevs | DocumentContentOptions.IncludeRevsInfo | DocumentContentOptions.IncludeConflicts | DocumentContentOptions.IncludeAttachments | DocumentContentOptions.IncludeLocalSeq) | DocumentContentOptions.IncludeExpiration) != 0) { var dst = rev.GetProperties() ?? new Dictionary <string, object>(); if (options.HasFlag(DocumentContentOptions.IncludeLocalSeq)) { dst["_local_seq"] = rev.Sequence; } if (options.HasFlag(DocumentContentOptions.IncludeRevs)) { var revs = db.GetRevisionHistory(rev, null); dst["_revisions"] = TreeRevisionID.MakeRevisionHistoryDict(revs); } if (options.HasFlag(DocumentContentOptions.IncludeRevsInfo)) { dst["_revs_info"] = db.GetRevisionHistory(rev, null).Select(x => { string status = "available"; var ancestor = db.GetDocument(rev.DocID, x, true); if (ancestor.Deleted) { status = "deleted"; } else if (ancestor.Missing) { status = "missing"; } return(new Dictionary <string, object> { { "rev", x.ToString() }, { "status", status } }); }); } if (options.HasFlag(DocumentContentOptions.IncludeConflicts)) { RevisionList revs = db.Storage.GetAllDocumentRevisions(rev.DocID, true, false); if (revs.Count > 1) { dst["_conflicts"] = from r in revs where !r.Equals(rev) && !r.Deleted select r.RevID.ToString(); } } if (options.HasFlag(DocumentContentOptions.IncludeExpiration)) { var expirationTime = db.Storage?.GetDocumentExpiration(rev.DocID); if (expirationTime.HasValue) { dst["_exp"] = expirationTime; } } RevisionInternal nuRev = new RevisionInternal(dst); if (options.HasFlag(DocumentContentOptions.IncludeAttachments)) { bool attEncodingInfo = context != null && context.GetQueryParam <bool>("att_encoding_info", bool.TryParse, false); db.ExpandAttachments(nuRev, 0, false, !attEncodingInfo); } rev = nuRev; } return(rev); }
private bool UploadMultipartRevision(RevisionInternal revision) { MultipartContent multiPart = null; var length = default(double); var revProps = revision.GetProperties(); var attachments = revProps.Get("_attachments").AsDictionary <string, object>(); foreach (var attachmentKey in attachments.Keys) { var attachment = attachments.Get(attachmentKey).AsDictionary <string, object>(); if (attachment.ContainsKey("follows")) { if (multiPart == null) { multiPart = new MultipartContent("related"); try { var json = Manager.GetObjectMapper().WriteValueAsString(revProps); var utf8charset = Encoding.UTF8; //multiPart.Add(new StringContent(json, utf8charset, "application/json"), "param1"); var jsonContent = new StringContent(json, utf8charset, "application/json"); //jsonContent.Headers.ContentDisposition = new ContentDispositionHeaderValue("attachment"); multiPart.Add(jsonContent); length += json.Length; } catch (Exception e) { throw Misc.CreateExceptionAndLog(Log.To.Sync, e, TAG, "Not able to serialize revision properties into a multipart request content."); } } var blobStore = LocalDatabase.Attachments; var base64Digest = (string)attachment.Get("digest"); var blobKey = new BlobKey(base64Digest); var inputStream = blobStore.BlobStreamForKey(blobKey); if (inputStream == null) { Log.To.Sync.W(TAG, "Unable to find blob file for blobKey: {0} - Skipping upload of multipart revision.", blobKey); multiPart = null; length = 0; } else { string contentType = null; if (attachment.ContainsKey("content_type")) { contentType = (string)attachment.Get("content_type"); } else { if (attachment.ContainsKey("content-type")) { var message = string.Format("Found attachment that uses content-type" + " field name instead of content_type (see couchbase-lite-android" + " issue #80): " + attachment); Log.To.Sync.W(TAG, message); } } var content = new StreamContent(inputStream); content.Headers.ContentDisposition = new ContentDispositionHeaderValue("attachment") { FileName = attachmentKey }; content.Headers.ContentType = new MediaTypeHeaderValue(contentType ?? "application/octet-stream"); multiPart.Add(content); length += inputStream.Length; } } } if (multiPart == null) { return(false); } var path = string.Format("/{0}?new_edits=false", revision.DocID); // TODO: need to throttle these requests Log.To.Sync.D(TAG, "{0} uploading multipart request. Revision: {1}", this, revision); SafeAddToChangesCount(1); SendAsyncMultipartRequest(HttpMethod.Put, path, multiPart, (result, e) => { if (e != null) { var httpError = Misc.Flatten(e) as HttpResponseException; if (httpError != null) { if (httpError.StatusCode == System.Net.HttpStatusCode.UnsupportedMediaType) { _dontSendMultipart = true; UploadJsonRevision(revision); } } else { LastError = e; RevisionFailed(); } } else { Log.To.Sync.V(TAG, "{0} sent multipart {1}", this, revision); SafeIncrementCompletedChangesCount(); RemovePending(revision); } }); Log.To.Sync.V(TAG, "{0} queuing revision (multipart, {1}kb)", this, length / 1024.0); return(true); }
private Status ImportDoc(string docID, long docNumericID) { // CREATE TABLE revs ( // sequence INTEGER PRIMARY KEY AUTOINCREMENT, // doc_id INTEGER NOT NULL REFERENCES docs(doc_id) ON DELETE CASCADE, // revid TEXT NOT NULL COLLATE REVID, // parent INTEGER REFERENCES revs(sequence) ON DELETE SET NULL, // current BOOLEAN, // deleted BOOLEAN DEFAULT 0, // json BLOB, // no_attachments BOOLEAN, // UNIQUE (doc_id, revid) ); sqlite3_stmt revQuery = null; Status status = PrepareSQL(ref revQuery, "SELECT sequence, revid, parent, current, deleted, json" + " FROM revs WHERE doc_id=? ORDER BY sequence"); if (status.IsError) { return(status); } raw.sqlite3_bind_int64(revQuery, 1, docNumericID); var tree = new Dictionary <long, IList <object> >(); int err; while (raw.SQLITE_ROW == (err = raw.sqlite3_step(revQuery))) { long sequence = raw.sqlite3_column_int64(revQuery, 0); string revID = raw.sqlite3_column_text(revQuery, 1); long parentSeq = raw.sqlite3_column_int64(revQuery, 2); bool current = raw.sqlite3_column_int(revQuery, 3) != 0; if (current) { // Add a leaf revision: bool deleted = raw.sqlite3_column_int(revQuery, 4) != 0; IEnumerable <byte> json = raw.sqlite3_column_blob(revQuery, 5); if (json == null) { json = Encoding.UTF8.GetBytes("{}"); } var nuJson = new List <byte>(json); status = AddAttachmentsToSequence(sequence, nuJson); if (status.IsError) { raw.sqlite3_finalize(revQuery); return(status); } json = nuJson; RevisionInternal rev = new RevisionInternal(docID, revID, deleted); rev.SetJson(json); var history = new List <string>(); history.Add(revID); while (parentSeq > 0) { var ancestor = tree.Get(parentSeq); Debug.Assert(ancestor != null, String.Format("Couldn't find parent sequence of {0} (doc {1})", parentSeq, docID)); history.Add((string)ancestor[0]); parentSeq = (long)ancestor[1]; } Log.D(TAG, "Upgrading doc {0} history {1}", rev, Manager.GetObjectMapper().WriteValueAsString(history)); try { _db.ForceInsert(rev, history, null); } catch (CouchbaseLiteException e) { status = e.CBLStatus; } if (status.IsError) { raw.sqlite3_finalize(revQuery); return(status); } NumRevs++; } else { tree[sequence] = new List <object> { revID, parentSeq }; } } raw.sqlite3_finalize(revQuery); ++NumDocs; return(SqliteErrToStatus(err)); }
internal void UpdateIndex() { Log.I(Database.Tag, "Re-indexing view {0} ...", Name); System.Diagnostics.Debug.Assert((Map != null)); if (Id <= 0) { var msg = string.Format("View.Id <= 0"); throw new CouchbaseLiteException(msg, new Status(StatusCode.NotFound)); } var result = new Status(StatusCode.InternalServerError); Cursor cursor = null; Cursor cursor2 = null; try { Database.RunInTransaction(() => { var lastSequence = LastSequenceIndexed; var dbMaxSequence = Database.LastSequenceNumber; if (lastSequence >= dbMaxSequence) { // nothing to do (eg, kCBLStatusNotModified) Log.V(Database.Tag, "lastSequence ({0}) == dbMaxSequence ({1}), nothing to do", lastSequence, dbMaxSequence); result.SetCode(StatusCode.NotModified); return(false); } // First remove obsolete emitted results from the 'maps' table: var sequence = lastSequence; if (lastSequence < 0) { var msg = string.Format("lastSequence < 0 ({0})", lastSequence); throw new CouchbaseLiteException(msg, new Status(StatusCode.InternalServerError)); } if (lastSequence == 0) { // If the lastSequence has been reset to 0, make sure to remove // any leftover rows: var whereArgs = new string[] { Id.ToString() }; Database.StorageEngine.Delete("maps", "view_id=?", whereArgs); } else { Database.OptimizeSQLIndexes(); // Delete all obsolete map results (ones from since-replaced // revisions): var args = new [] { Id.ToString(), lastSequence.ToString(), lastSequence.ToString() }; Database.StorageEngine.ExecSQL( "DELETE FROM maps WHERE view_id=? AND sequence IN (" + "SELECT parent FROM revs WHERE sequence>? " + "AND +parent>0 AND +parent<=?)", args); } var deleted = 0; cursor = Database.StorageEngine.IntransactionRawQuery("SELECT changes()"); cursor.MoveToNext(); deleted = cursor.GetInt(0); cursor.Close(); // Find a better way to propagate this back // Now scan every revision added since the last time the view was indexed: var selectArgs = new[] { lastSequence.ToString(), dbMaxSequence.ToString() }; cursor = Database.StorageEngine.IntransactionRawQuery("SELECT revs.doc_id, sequence, docid, revid, json, no_attachments FROM revs, docs " + "WHERE sequence>? AND sequence<=? AND current!=0 AND deleted=0 " + "AND revs.doc_id = docs.doc_id " + "ORDER BY revs.doc_id, revid DESC", selectArgs); var lastDocID = 0L; var keepGoing = cursor.MoveToNext(); while (keepGoing) { long docID = cursor.GetLong(0); if (docID != lastDocID) { // Only look at the first-iterated revision of any document, // because this is the // one with the highest revid, hence the "winning" revision // of a conflict. lastDocID = docID; // Reconstitute the document as a dictionary: sequence = cursor.GetLong(1); string docId = cursor.GetString(2); if (docId.StartsWith("_design/", StringComparison.InvariantCultureIgnoreCase)) { // design docs don't get indexed! keepGoing = cursor.MoveToNext(); continue; } var revId = cursor.GetString(3); var json = cursor.GetBlob(4); var noAttachments = cursor.GetInt(5) > 0; // Skip rows with the same doc_id -- these are losing conflicts. while ((keepGoing = cursor.MoveToNext()) && cursor.GetLong(0) == docID) { } if (lastSequence > 0) { // Find conflicts with documents from previous indexings. var selectArgs2 = new[] { Convert.ToString(docID), Convert.ToString(lastSequence) }; cursor2 = Database.StorageEngine.IntransactionRawQuery("SELECT revid, sequence FROM revs " + "WHERE doc_id=? AND sequence<=? AND current!=0 AND deleted=0 " + "ORDER BY revID DESC " + "LIMIT 1", selectArgs2); if (cursor2.MoveToNext()) { var oldRevId = cursor2.GetString(0); // This is the revision that used to be the 'winner'. // Remove its emitted rows: var oldSequence = cursor2.GetLong(1); var args = new[] { Sharpen.Extensions.ToString(Id), Convert.ToString(oldSequence) }; Database.StorageEngine.ExecSQL("DELETE FROM maps WHERE view_id=? AND sequence=?", args); if (RevisionInternal.CBLCompareRevIDs(oldRevId, revId) > 0) { // It still 'wins' the conflict, so it's the one that // should be mapped [again], not the current revision! revId = oldRevId; sequence = oldSequence; var selectArgs3 = new[] { Convert.ToString(sequence) }; json = Misc.ByteArrayResultForQuery( Database.StorageEngine, "SELECT json FROM revs WHERE sequence=?", selectArgs3 ); } } cursor2.Close(); cursor2 = null; } // Get the document properties, to pass to the map function: var contentOptions = DocumentContentOptions.None; if (noAttachments) { contentOptions |= DocumentContentOptions.NoAttachments; } var properties = Database.DocumentPropertiesFromJSON( json, docId, revId, false, sequence, DocumentContentOptions.None ); if (properties != null) { // Call the user-defined map() to emit new key/value // pairs from this revision: // This is the emit() block, which gets called from within the // user-defined map() block // that's called down below. var enclosingView = this; var thisSequence = sequence; var map = Map; if (map == null) { throw new CouchbaseLiteException("Map function is missing."); } EmitDelegate emitBlock = (key, value) => { // TODO: Do we need to do any null checks on key or value? try { var keyJson = Manager.GetObjectMapper().WriteValueAsString(key); var valueJson = value == null ? null : Manager.GetObjectMapper().WriteValueAsString(value); var insertValues = new ContentValues(); insertValues.Put("view_id", enclosingView.Id); insertValues["sequence"] = thisSequence; insertValues["key"] = keyJson; insertValues["value"] = valueJson; enclosingView.Database.StorageEngine.Insert("maps", null, insertValues); } catch (Exception e) { Log.E(Database.Tag, "Error emitting", e); } }; map(properties, emitBlock); } } else { keepGoing = cursor.MoveToNext(); } } // Finally, record the last revision sequence number that was // indexed: var updateValues = new ContentValues(); updateValues["lastSequence"] = dbMaxSequence; var whereArgs_1 = new string[] { Id.ToString() }; Database.StorageEngine.Update("views", updateValues, "view_id=?", whereArgs_1); // FIXME actually count number added :) Log.V(Database.Tag, "...Finished re-indexing view {0} up to sequence {1} (deleted {2} added ?)", Name, Convert.ToString(dbMaxSequence), deleted); result.SetCode(StatusCode.Ok); return(true); }); } catch (Exception e) { throw new CouchbaseLiteException(e, new Status(StatusCode.DbError)); } finally { if (cursor2 != null) { cursor2.Close(); } if (cursor != null) { cursor.Close(); } if (!result.IsSuccessful) { Log.W(Database.Tag, "Failed to rebuild view {0}:{1}", Name, result.GetCode()); } } }
internal ValidationContext(Database database, RevisionInternal currentRevision, RevisionInternal newRevision) { Database = database; InternalRevision = currentRevision; NewRevision = newRevision; }
public void TestLocalDocs() { //create a document var documentProperties = new Dictionary <string, object>(); documentProperties["_id"] = "_local/doc1"; documentProperties["foo"] = 1; documentProperties["bar"] = false; var body = new Body(documentProperties); var rev1 = new RevisionInternal(body); rev1 = database.PutLocalRevision(rev1, null); Log.V(Tag, "Created " + rev1); Assert.AreEqual("_local/doc1", rev1.GetDocId()); Assert.IsTrue(rev1.GetRevId().StartsWith("1-")); //read it back var readRev = database.GetLocalDocument(rev1.GetDocId(), null); Assert.IsNotNull(readRev); var readRevProps = readRev.GetProperties(); Assert.AreEqual(rev1.GetDocId(), readRevProps.Get("_id")); Assert.AreEqual(rev1.GetRevId(), readRevProps.Get("_rev")); AssertPropertiesAreEqual(UserProperties(readRevProps), UserProperties(body.GetProperties())); //now update it documentProperties = (Dictionary <string, object>)readRev.GetProperties(); documentProperties["status"] = "updated!"; body = new Body(documentProperties); var rev2 = new RevisionInternal(body); var rev2input = rev2; rev2 = database.PutLocalRevision(rev2, rev1.GetRevId()); Log.V(Tag, "Updated " + rev1); Assert.AreEqual(rev1.GetDocId(), rev2.GetDocId()); Assert.IsTrue(rev2.GetRevId().StartsWith("2-")); //read it back readRev = database.GetLocalDocument(rev2.GetDocId(), null); Assert.IsNotNull(readRev); AssertPropertiesAreEqual(UserProperties(readRev.GetProperties()), UserProperties(body.GetProperties())); // Try to update the first rev, which should fail: var gotException = false; try { database.PutLocalRevision(rev2input, rev1.GetRevId()); } catch (CouchbaseLiteException e) { Assert.AreEqual(StatusCode.Conflict, e.GetCBLStatus().GetCode()); gotException = true; } Assert.IsTrue(gotException); // Delete it: var revD = new RevisionInternal(rev2.GetDocId(), null, true); gotException = false; try { var revResult = database.PutLocalRevision(revD, null); Assert.IsNull(revResult); } catch (CouchbaseLiteException e) { Assert.AreEqual(StatusCode.Conflict, e.GetCBLStatus().GetCode()); gotException = true; } Assert.IsTrue(gotException); revD = database.PutLocalRevision(revD, rev2.GetRevId()); // Delete nonexistent doc: gotException = false; var revFake = new RevisionInternal("_local/fake", null, true); try { database.PutLocalRevision(revFake, null); } catch (CouchbaseLiteException e) { Assert.AreEqual(StatusCode.NotFound, e.GetCBLStatus().GetCode()); gotException = true; } Assert.IsTrue(gotException); // Read it back (should fail): readRev = database.GetLocalDocument(revD.GetDocId(), null); Assert.IsNull(readRev); }
/// <summary>Constructor</summary> internal SavedRevision(Database database, RevisionInternal revision) : this(database.GetDocument(revision == null ? null : revision.DocID), revision) { }
/// <summary> /// Create and update multiple documents at the same time within a single request. /// </summary> /// <returns>The response state for further HTTP processing</returns> /// <param name="context">The context of the Couchbase Lite HTTP request</param> /// <remarks> /// http://docs.couchdb.org/en/latest/api/database/bulk-api.html#post--db-_bulk_docs /// <remarks> public static ICouchbaseResponseState ProcessDocumentChangeOperations(ICouchbaseListenerContext context) { return(PerformLogicWithDatabase(context, true, db => { var postBody = context.BodyAs <Dictionary <string, object> >(); if (postBody == null) { return context.CreateResponse(StatusCode.BadJson); } if (!postBody.ContainsKey("docs")) { return context.CreateResponse(StatusCode.BadParam); } var docs = postBody["docs"].AsList <IDictionary <string, object> >(); bool allOrNothing; postBody.TryGetValue <bool>("all_or_nothing", out allOrNothing); bool newEdits; postBody.TryGetValue <bool>("new_edits", out newEdits); var response = context.CreateResponse(); StatusCode status = StatusCode.Ok; bool success = db.RunInTransaction(() => { List <IDictionary <string, object> > results = new List <IDictionary <string, object> >(docs.Count); foreach (var doc in docs) { string docId = doc.GetCast <string>("_id"); RevisionInternal rev = null; Body body = new Body(doc); if (!newEdits) { if (!RevisionInternal.IsValid(body)) { status = StatusCode.BadParam; } else { rev = new RevisionInternal(body); var history = Database.ParseCouchDBRevisionHistory(doc); try { db.ForceInsert(rev, history, null); } catch (CouchbaseLiteException e) { status = e.Code; } } } else { status = DocumentMethods.UpdateDocument(context, db, docId, body, false, allOrNothing, out rev); } IDictionary <string, object> result = null; if ((int)status < 300) { Debug.Assert(rev != null && rev.GetRevId() != null); if (newEdits) { result = new Dictionary <string, object> { { "id", rev.GetDocId() }, { "rev", rev.GetRevId() }, { "status", (int)status } }; } } else if ((int)status >= 500) { return false; // abort the whole thing if something goes badly wrong } else if (allOrNothing) { return false; // all_or_nothing backs out if there's any error } else { var info = Status.ToHttpStatus(status); result = new Dictionary <string, object> { { "id", docId }, { "error", info.Item2 }, { "status", info.Item1 } }; } if (result != null) { results.Add(result); } } response.JsonBody = new Body(results.Cast <object>().ToList()); return true; }); if (!success) { response.InternalStatus = status; } return response; }).AsDefaultState()); }
public IEnumerable <QueryRow> RegularQuery(QueryOptions options) { var db = _dbStorage; var filter = options.Filter; int limit = int.MaxValue; int skip = 0; if (filter != null) { // Custom post-filter means skip/limit apply to the filtered rows, not to the // underlying query, so handle them specially: limit = options.Limit; skip = options.Skip; options.Limit = QueryOptions.DEFAULT_LIMIT; options.Skip = 0; } var rows = new List <QueryRow>(); RunQuery(options, (keyData, valueData, docId, cursor) => { long sequence = cursor.GetLong(3); RevisionInternal docRevision = null; if (options.IncludeDocs) { IDictionary <string, object> value = null; if (valueData != null && !RowValueIsEntireDoc(valueData.Value)) { value = valueData.Value.AsDictionary <string, object>(); } string linkedId = value == null ? null : value.GetCast <string>("_id"); if (linkedId != null) { // Linked document: http://wiki.apache.org/couchdb/Introduction_to_CouchDB_views#Linked_documents string linkedRev = value == null ? null : value.GetCast <string>("_rev"); //usually null docRevision = db.GetDocument(linkedId, linkedRev, true); sequence = docRevision == null ? 0 : docRevision.GetSequence(); } else { docRevision = db.GetRevision(docId, cursor.GetString(4), false, sequence, cursor.GetBlob(5)); } } Log.V(TAG, "Query {0}: Found row with key={1}, value={2}, id={3}", Name, keyData.Value, valueData.Value, docId); QueryRow row = null; if (false) { //TODO: bbox } else { row = new QueryRow(docId, sequence, keyData.Value, valueData.Value, docRevision, this); } if (filter != null) { if (!filter(row)) { return(new Status(StatusCode.Ok)); } if (skip > 0) { --skip; return(new Status(StatusCode.Ok)); } } rows.Add(row); if (limit-- == 0) { return(new Status(StatusCode.Reserved)); } return(new Status(StatusCode.Ok)); }); // If given keys, sort the output into that order, and add entries for missing keys: if (options.Keys != null) { // Group rows by key: var rowsByKey = new Dictionary <string, List <QueryRow> >(); foreach (var row in rows) { var key = ToJSONString(row.Key); var dictRows = rowsByKey.Get(key); if (dictRows == null) { dictRows = rowsByKey[key] = new List <QueryRow>(); } dictRows.Add(row); } // Now concatenate them in the order the keys are given in options: var sortedRows = new List <QueryRow>(); foreach (var key in options.Keys.Select(x => ToJSONString(x))) { var dictRows = rowsByKey.Get(key); if (dictRows != null) { sortedRows.AddRange(dictRows); } } rows = sortedRows; } return(rows); }
public void UpdateIndex() { Log.V(Log.TagView, "Re-indexing view: %s", name); System.Diagnostics.Debug.Assert((mapBlock != null)); if (GetViewId() <= 0) { string msg = string.Format("getViewId() < 0"); throw new CouchbaseLiteException(msg, new Status(Status.NotFound)); } database.BeginTransaction(); Status result = new Status(Status.InternalServerError); Cursor cursor = null; try { long lastSequence = GetLastSequenceIndexed(); long dbMaxSequence = database.GetLastSequenceNumber(); if (lastSequence == dbMaxSequence) { // nothing to do (eg, kCBLStatusNotModified) Log.V(Log.TagView, "lastSequence (%s) == dbMaxSequence (%s), nothing to do", lastSequence , dbMaxSequence); result.SetCode(Status.NotModified); return; } // First remove obsolete emitted results from the 'maps' table: long sequence = lastSequence; if (lastSequence < 0) { string msg = string.Format("lastSequence < 0 (%s)", lastSequence); throw new CouchbaseLiteException(msg, new Status(Status.InternalServerError)); } if (lastSequence == 0) { // If the lastSequence has been reset to 0, make sure to remove // any leftover rows: string[] whereArgs = new string[] { Sharpen.Extensions.ToString(GetViewId()) }; database.GetDatabase().Delete("maps", "view_id=?", whereArgs); } else { // Delete all obsolete map results (ones from since-replaced // revisions): string[] args = new string[] { Sharpen.Extensions.ToString(GetViewId()), System.Convert.ToString (lastSequence), System.Convert.ToString(lastSequence) }; database.GetDatabase().ExecSQL("DELETE FROM maps WHERE view_id=? AND sequence IN (" + "SELECT parent FROM revs WHERE sequence>? " + "AND parent>0 AND parent<=?)", args); } int deleted = 0; cursor = database.GetDatabase().RawQuery("SELECT changes()", null); cursor.MoveToNext(); deleted = cursor.GetInt(0); cursor.Close(); // This is the emit() block, which gets called from within the // user-defined map() block // that's called down below. AbstractTouchMapEmitBlock emitBlock = new _AbstractTouchMapEmitBlock_428(this); //Log.v(Log.TAG_VIEW, " emit(" + keyJson + ", " // + valueJson + ")"); // find a better way to propagate this back // Now scan every revision added since the last time the view was // indexed: string[] selectArgs = new string[] { System.Convert.ToString(lastSequence) }; cursor = database.GetDatabase().RawQuery("SELECT revs.doc_id, sequence, docid, revid, json, no_attachments FROM revs, docs " + "WHERE sequence>? AND current!=0 AND deleted=0 " + "AND revs.doc_id = docs.doc_id " + "ORDER BY revs.doc_id, revid DESC", selectArgs); long lastDocID = 0; bool keepGoing = cursor.MoveToNext(); while (keepGoing) { long docID = cursor.GetLong(0); if (docID != lastDocID) { // Only look at the first-iterated revision of any document, // because this is the // one with the highest revid, hence the "winning" revision // of a conflict. lastDocID = docID; // Reconstitute the document as a dictionary: sequence = cursor.GetLong(1); string docId = cursor.GetString(2); if (docId.StartsWith("_design/")) { // design docs don't get indexed! keepGoing = cursor.MoveToNext(); continue; } string revId = cursor.GetString(3); byte[] json = cursor.GetBlob(4); bool noAttachments = cursor.GetInt(5) > 0; while ((keepGoing = cursor.MoveToNext()) && cursor.GetLong(0) == docID) { } // Skip rows with the same doc_id -- these are losing conflicts. if (lastSequence > 0) { // Find conflicts with documents from previous indexings. string[] selectArgs2 = new string[] { System.Convert.ToString(docID), System.Convert.ToString (lastSequence) }; Cursor cursor2 = database.GetDatabase().RawQuery("SELECT revid, sequence FROM revs " + "WHERE doc_id=? AND sequence<=? AND current!=0 AND deleted=0 " + "ORDER BY revID DESC " + "LIMIT 1", selectArgs2); if (cursor2.MoveToNext()) { string oldRevId = cursor2.GetString(0); // This is the revision that used to be the 'winner'. // Remove its emitted rows: long oldSequence = cursor2.GetLong(1); string[] args = new string[] { Sharpen.Extensions.ToString(GetViewId()), System.Convert.ToString (oldSequence) }; database.GetDatabase().ExecSQL("DELETE FROM maps WHERE view_id=? AND sequence=?", args); if (RevisionInternal.CBLCompareRevIDs(oldRevId, revId) > 0) { // It still 'wins' the conflict, so it's the one that // should be mapped [again], not the current revision! revId = oldRevId; sequence = oldSequence; string[] selectArgs3 = new string[] { System.Convert.ToString(sequence) }; json = Utils.ByteArrayResultForQuery(database.GetDatabase(), "SELECT json FROM revs WHERE sequence=?" , selectArgs3); } } } // Get the document properties, to pass to the map function: EnumSet <Database.TDContentOptions> contentOptions = EnumSet.NoneOf <Database.TDContentOptions >(); if (noAttachments) { contentOptions.AddItem(Database.TDContentOptions.TDNoAttachments); } IDictionary <string, object> properties = database.DocumentPropertiesFromJSON(json , docId, revId, false, sequence, contentOptions); if (properties != null) { // Call the user-defined map() to emit new key/value // pairs from this revision: emitBlock.SetSequence(sequence); mapBlock.Map(properties, emitBlock); } } } // Finally, record the last revision sequence number that was // indexed: ContentValues updateValues = new ContentValues(); updateValues.Put("lastSequence", dbMaxSequence); string[] whereArgs_1 = new string[] { Sharpen.Extensions.ToString(GetViewId()) }; database.GetDatabase().Update("views", updateValues, "view_id=?", whereArgs_1); // FIXME actually count number added :) Log.V(Log.TagView, "Finished re-indexing view: %s " + " up to sequence %s" + " (deleted %s added ?)" , name, dbMaxSequence, deleted); result.SetCode(Status.Ok); } catch (SQLException e) { throw new CouchbaseLiteException(e, new Status(Status.DbError)); } finally { if (cursor != null) { cursor.Close(); } if (!result.IsSuccessful()) { Log.W(Log.TagView, "Failed to rebuild view %s. Result code: %d", name, result.GetCode ()); } if (database != null) { database.EndTransaction(result.IsSuccessful()); } } }