public AtomicAction ActionToChangeEncryptionKey(SymmetricKey newKey) { var action = new AtomicAction(); // Find all the blob files: var blobs = default(string[]); var oldKey = EncryptionKey; blobs = Directory.GetFiles(_path, "*" + FileExtension); if (blobs.Length == 0) { // No blobs, so nothing to encrypt. Just add/remove the encryption marker file: action.AddLogic(() => { Log.To.NoDomain.D(TAG, "{0} {1}", (newKey != null) ? "encrypting" : "decrypting", _path); Log.To.NoDomain.D(TAG, " No blobs to copy; done."); EncryptionKey = newKey; MarkEncrypted(newKey != null); }, () => { EncryptionKey = oldKey; MarkEncrypted(oldKey != null); }, null); return(action); } // Create a new directory for the new blob store. Have to do this now, before starting the // action, because farther down we create an action to move it... var tempPath = Path.Combine(Path.GetTempPath(), String.Format("CouchbaseLite-Temp-{0}", Misc.CreateGUID())); action.AddLogic(() => { Log.To.NoDomain.D(TAG, "{0} {1}", (newKey != null) ? "encrypting" : "decrypting", _path); Directory.CreateDirectory(tempPath); }, () => Directory.Delete(tempPath, true), null); var tempStore = default(BlobStore); action.AddLogic(() => { tempStore = new BlobStore(tempPath, newKey); tempStore.MarkEncrypted(true); }, null, null); // Copy each of my blobs into the new store (which will update its encryption): action.AddLogic(() => { foreach (var blobName in blobs) { // Copy file by reading with old key and writing with new one: Log.To.NoDomain.D(TAG, " Copying {0}", blobName); Stream readStream = File.Open(blobName, FileMode.Open, FileAccess.Read, FileShare.Read); if (EncryptionKey != null) { readStream = EncryptionKey.DecryptStream(readStream); } var writer = new BlobStoreWriter(tempStore); try { writer.Read(readStream); writer.Finish(); writer.Install(); } catch (Exception) { writer.Cancel(); throw; } finally { readStream.Dispose(); } } }, null, null); // Replace the attachment dir with the new one: action.AddLogic(AtomicAction.MoveDirectory(tempPath, _path)); // Finally update EncryptionKey: action.AddLogic(() => { EncryptionKey = newKey; }, () => { EncryptionKey = oldKey; }, null); return(action); }
private string GetFilename(SecureStorageRequest request) { var filenameBytes = Encoding.UTF8.GetBytes($"{request.Account}"); return Path.Combine(GetFoldername(request), $"{Misc.HexSHA1Digest(filenameBytes)}.bin"); }
// This is used by the listener internal Replication ReplicationWithProperties(IDictionary <string, object> properties) { // Extract the parameters from the JSON request body: // http://wiki.apache.org/couchdb/Replication bool push, createTarget; var results = new Dictionary <string, object>() { { "database", null }, { "remote", null }, { "headers", null }, { "authorizer", null } }; Status result = ParseReplicationProperties(properties, out push, out createTarget, results); if (result.IsError) { throw Misc.CreateExceptionAndLog(Log.To.Listener, result.Code, TAG, "Failed to create replication"); } object continuousObj = properties.Get("continuous"); bool continuous = false; if (continuousObj is bool) { continuous = (bool)continuousObj; } var scheduler = new SingleTaskThreadpoolScheduler(); Replication rep = null; if (push) { rep = new Pusher((Database)results["database"], (Uri)results["remote"], continuous, new TaskFactory(scheduler)); } else { rep = new Puller((Database)results["database"], (Uri)results["remote"], continuous, new TaskFactory(scheduler)); } rep.Filter = properties.Get("filter") as string; rep.FilterParams = properties.Get("query_params").AsDictionary <string, object>(); rep.DocIds = properties.Get("doc_ids").AsList <string>(); rep.Headers = new Dictionary <string, string>(); rep.ReplicationOptions = new ReplicationOptions(properties); foreach (var header in results.Get("headers").AsDictionary <string, string>()) { if (header.Key.ToLowerInvariant() == "cookie") { var cookie = default(Cookie); if (CookieParser.TryParse(header.Value, ((Uri)results["remote"]).Host, out cookie)) { try { rep.SetCookie(cookie.Name, cookie.Value, cookie.Path, cookie.Expires, cookie.Secure, cookie.HttpOnly); } catch (CookieException e) { var headerValue = new SecureLogString(header.Value, LogMessageSensitivity.Insecure); Log.To.Listener.W(TAG, $"Invalid cookie string received ({headerValue}), throwing...", e); throw new CouchbaseLiteException(StatusCode.BadRequest); } } else { var headerValue = new SecureLogString(header.Value, LogMessageSensitivity.Insecure); Log.To.Listener.W(TAG, $"Invalid cookie string received ({headerValue}), throwing..."); throw new CouchbaseLiteException(StatusCode.BadRequest); } } else { rep.Headers.Add(header.Key, header.Value); } } rep.Headers = results.Get("headers").AsDictionary <string, string>(); rep.Authenticator = results.Get("authorizer") as IAuthenticator; if (push) { ((Pusher)rep).CreateTarget = createTarget; } var db = (Database)results["database"]; // If this is a duplicate, reuse an existing replicator: var activeReplicators = default(IList <Replication>); var existing = default(Replication); if (db.ActiveReplicators.AcquireTemp(out activeReplicators)) { existing = activeReplicators.FirstOrDefault(x => x.LocalDatabase == rep.LocalDatabase && x.RemoteUrl == rep.RemoteUrl && x.IsPull == rep.IsPull && x.RemoteCheckpointDocID().Equals(rep.RemoteCheckpointDocID())); } return(existing ?? rep); }
public void StopIncrementalParse() { _textReader?.Close(); Misc.SafeDispose(ref _textReader); }
/// <summary> /// Initializes a Manager that stores Databases in the given directory. /// </summary> /// <param name="directoryFile"><see cref="System.IO.DirectoryInfo"/> object for initializing the Manager object.</param> /// <param name="options">Option flags for initialization.</param> /// <exception cref="T:System.IO.DirectoryNotFoundException">Thrown when there is an error while accessing or creating the given directory.</exception> public Manager(DirectoryInfo directoryFile, ManagerOptions options) { if (directoryFile == null) { Log.To.Database.E(TAG, "directoryFile cannot be null in ctor, throwing..."); throw new ArgumentNullException("directoryFile"); } this.directoryFile = directoryFile; Options = options ?? DefaultOptions; this.databases = new Dictionary <string, Database>(); this.replications = new List <Replication>(); Shared = new SharedState(); //create the directory, but don't fail if it already exists if (!directoryFile.Exists) { directoryFile.Create(); directoryFile.Refresh(); if (!directoryFile.Exists) { throw Misc.CreateExceptionAndLog(Log.To.Database, StatusCode.InternalServerError, TAG, "Unable to create directory {0}", directoryFile); } } UpgradeOldDatabaseFiles(directoryFile); #if __IOS__ Foundation.NSString protection; switch (options.FileProtection & Foundation.NSDataWritingOptions.FileProtectionMask) { case Foundation.NSDataWritingOptions.FileProtectionNone: protection = Foundation.NSFileManager.FileProtectionNone; break; case Foundation.NSDataWritingOptions.FileProtectionComplete: protection = Foundation.NSFileManager.FileProtectionComplete; break; case Foundation.NSDataWritingOptions.FileProtectionCompleteUntilFirstUserAuthentication: protection = Foundation.NSFileManager.FileProtectionCompleteUntilFirstUserAuthentication; break; default: protection = Foundation.NSFileManager.FileProtectionCompleteUnlessOpen; break; } var attributes = new Foundation.NSDictionary(Foundation.NSFileManager.FileProtectionKey, protection); Foundation.NSError error; Foundation.NSFileManager.DefaultManager.SetAttributes(attributes, directoryFile.FullName, out error); #endif var scheduler = options.CallbackScheduler; CapturedContext = new TaskFactory(scheduler); Log.To.TaskScheduling.I(TAG, "Callbacks will be scheduled on {0}", scheduler); workExecutor = new TaskFactory(new SingleTaskThreadpoolScheduler()); _networkReachabilityManager = new NetworkReachabilityManager(); _networkReachabilityManager.StartListening(); StorageType = "SQLite"; Log.To.Database.I(TAG, "Created {0}", this); }
// This is used by the listener internal Replication ReplicationWithProperties(IDictionary <string, object> properties) { // Extract the parameters from the JSON request body: // http://wiki.apache.org/couchdb/Replication bool push, createTarget; var results = new Dictionary <string, object>() { { "database", null }, { "remote", null }, { "headers", null }, { "authorizer", null } }; Status result = ParseReplicationProperties(properties, out push, out createTarget, results); if (result.IsError) { throw Misc.CreateExceptionAndLog(Log.To.Listener, result.Code, TAG, "Failed to create replication"); } object continuousObj = properties.Get("continuous"); bool continuous = false; if (continuousObj is bool) { continuous = (bool)continuousObj; } var scheduler = new SingleTaskThreadpoolScheduler(); Replication rep = null; if (push) { rep = new Pusher((Database)results["database"], (Uri)results["remote"], continuous, new TaskFactory(scheduler)); } else { rep = new Puller((Database)results["database"], (Uri)results["remote"], continuous, new TaskFactory(scheduler)); } rep.Filter = properties.Get("filter") as string; rep.FilterParams = properties.Get("query_params") as IDictionary <string, object>; rep.DocIds = properties.Get("doc_ids") as IEnumerable <string>; rep.RequestHeaders = results.Get("headers") as IDictionary <string, object>; rep.Authenticator = results.Get("authorizer") as IAuthenticator; if (push) { ((Pusher)rep).CreateTarget = createTarget; } var db = (Database)results["database"]; // If this is a duplicate, reuse an existing replicator: var activeReplicators = default(IList <Replication>); var existing = default(Replication); if (db.ActiveReplicators.AcquireTemp(out activeReplicators)) { existing = activeReplicators.FirstOrDefault(x => x.LocalDatabase == rep.LocalDatabase && x.RemoteUrl == rep.RemoteUrl && x.IsPull == rep.IsPull && x.RemoteCheckpointDocID().Equals(rep.RemoteCheckpointDocID())); } return(existing ?? rep); }
public int Compare(RevisionInternal rev1, RevisionInternal rev2) { return(Misc.TDSequenceCompare(rev1.GetSequence(), rev2.GetSequence())); }
internal void UpdateIndex() { Log.V(Database.Tag, "Re-indexing view {0} ...", Name); System.Diagnostics.Debug.Assert((Map != null)); if (Id <= 0) { var msg = string.Format("View.Id <= 0"); throw new CouchbaseLiteException(msg, new Status(StatusCode.NotFound)); } Database.BeginTransaction(); var result = new Status(StatusCode.InternalServerError); Cursor cursor = null; try { var lastSequence = LastSequenceIndexed; var dbMaxSequence = Database.LastSequenceNumber; if (lastSequence == dbMaxSequence) { // nothing to do (eg, kCBLStatusNotModified) Log.V(Database.Tag, "lastSequence ({0}) == dbMaxSequence ({1}), nothing to do", lastSequence, dbMaxSequence); result.SetCode(StatusCode.NotModified); return; } // First remove obsolete emitted results from the 'maps' table: var sequence = lastSequence; if (lastSequence < 0) { var msg = string.Format("lastSequence < 0 ({0})", lastSequence); throw new CouchbaseLiteException(msg, new Status(StatusCode.InternalServerError)); } if (lastSequence == 0) { // If the lastSequence has been reset to 0, make sure to remove // any leftover rows: var whereArgs = new string[] { Id.ToString() }; Database.StorageEngine.Delete("maps", "view_id=?", whereArgs); } else { // Delete all obsolete map results (ones from since-replaced // revisions): var args = new [] { Id.ToString(), lastSequence.ToString(), lastSequence.ToString() }; Database.StorageEngine.ExecSQL( "DELETE FROM maps WHERE view_id=? AND sequence IN (" + "SELECT parent FROM revs WHERE sequence>? " + "AND parent>0 AND parent<=?)", args); } var deleted = 0; cursor = Database.StorageEngine.RawQuery("SELECT changes()"); cursor.MoveToNext(); deleted = cursor.GetInt(0); cursor.Close(); // Find a better way to propagate this back // Now scan every revision added since the last time the view was indexed: var selectArgs = new[] { lastSequence.ToString() }; cursor = Database.StorageEngine.RawQuery("SELECT revs.doc_id, sequence, docid, revid, json, no_attachments FROM revs, docs " + "WHERE sequence>? AND current!=0 AND deleted=0 " + "AND revs.doc_id = docs.doc_id " + "ORDER BY revs.doc_id, revid DESC", CommandBehavior.SequentialAccess, selectArgs); var lastDocID = 0L; var keepGoing = cursor.MoveToNext(); while (keepGoing) { long docID = cursor.GetLong(0); if (docID != lastDocID) { // Only look at the first-iterated revision of any document, // because this is the // one with the highest revid, hence the "winning" revision // of a conflict. lastDocID = docID; // Reconstitute the document as a dictionary: sequence = cursor.GetLong(1); string docId = cursor.GetString(2); if (docId.StartsWith("_design/", StringComparison.InvariantCultureIgnoreCase)) { // design docs don't get indexed! keepGoing = cursor.MoveToNext(); continue; } var revId = cursor.GetString(3); var json = cursor.GetBlob(4); var noAttachments = cursor.GetInt(5) > 0; // Skip rows with the same doc_id -- these are losing conflicts. while ((keepGoing = cursor.MoveToNext()) && cursor.GetLong(0) == docID) { } if (lastSequence > 0) { // Find conflicts with documents from previous indexings. var selectArgs2 = new[] { Convert.ToString(docID), Convert.ToString(lastSequence) }; var cursor2 = Database.StorageEngine.RawQuery("SELECT revid, sequence FROM revs " + "WHERE doc_id=? AND sequence<=? AND current!=0 AND deleted=0 " + "ORDER BY revID DESC " + "LIMIT 1", selectArgs2); if (cursor2.MoveToNext()) { var oldRevId = cursor2.GetString(0); // This is the revision that used to be the 'winner'. // Remove its emitted rows: var oldSequence = cursor2.GetLong(1); var args = new[] { Sharpen.Extensions.ToString(Id), Convert.ToString(oldSequence) }; Database.StorageEngine.ExecSQL("DELETE FROM maps WHERE view_id=? AND sequence=?", args); if (RevisionInternal.CBLCompareRevIDs(oldRevId, revId) > 0) { // It still 'wins' the conflict, so it's the one that // should be mapped [again], not the current revision! revId = oldRevId; sequence = oldSequence; var selectArgs3 = new[] { Convert.ToString(sequence) }; json = Misc.ByteArrayResultForQuery( Database.StorageEngine, "SELECT json FROM revs WHERE sequence=?", selectArgs3 ); } } } // Get the document properties, to pass to the map function: var contentOptions = DocumentContentOptions.None; if (noAttachments) { contentOptions |= DocumentContentOptions.NoAttachments; } var properties = Database.DocumentPropertiesFromJSON( json, docId, revId, false, sequence, DocumentContentOptions.None ); if (properties != null) { // Call the user-defined map() to emit new key/value // pairs from this revision: // This is the emit() block, which gets called from within the // user-defined map() block // that's called down below. var enclosingView = this; var thisSequence = sequence; var map = Map; if (map == null) { throw new CouchbaseLiteException("Map function is missing."); } EmitDelegate emitBlock = (key, value) => { // TODO: Do we need to do any null checks on key or value? try { var keyJson = Manager.GetObjectMapper().WriteValueAsString(key); var valueJson = value == null ? null : Manager.GetObjectMapper().WriteValueAsString(value); var insertValues = new ContentValues(); insertValues.Put("view_id", enclosingView.Id); insertValues["sequence"] = thisSequence; insertValues["key"] = keyJson; insertValues["value"] = valueJson; enclosingView.Database.StorageEngine.Insert("maps", null, insertValues); // // According to the issue #81, it is possible that there will be another // thread inserting a new revision to the database at the same time that // the UpdateIndex operation is running. This event should be guarded by // the database transaction that the code begun but apparently it was not. // As a result, it is possible that dbMaxSequence will be out of date at // this point and could cause the last indexed sequence to be out of track // from the obsolete map entry cleanup operation, which eventually results // to duplicated documents in the indexed map. // // To prevent the issue above, as a workaroubd, we need to make sure that // we have the current max sequence of the indexed documents updated. // This diverts from the CBL's Android code which doesn't have the same issue // as the Android doesn't allow multiple thread to interact with the database // at the same time. if (thisSequence > dbMaxSequence) { dbMaxSequence = thisSequence; } } catch (Exception e) { Log.E(Database.Tag, "Error emitting", e); } }; map(properties, emitBlock); } } } // Finally, record the last revision sequence number that was // indexed: var updateValues = new ContentValues(); updateValues["lastSequence"] = dbMaxSequence; var whereArgs_1 = new string[] { Id.ToString() }; Database.StorageEngine.Update("views", updateValues, "view_id=?", whereArgs_1); // FIXME actually count number added :) Log.V(Database.Tag, "...Finished re-indexing view {0} up to sequence {1} (deleted {2} added ?)", Name, Convert.ToString(dbMaxSequence), deleted); result.SetCode(StatusCode.Ok); } catch (Exception e) { throw new CouchbaseLiteException(e, new Status(StatusCode.DbError)); } finally { if (cursor != null) { cursor.Close(); } if (!result.IsSuccessful) { Log.W(Database.Tag, "Failed to rebuild view {0}:{1}", Name, result.GetCode()); } if (Database != null) { Database.EndTransaction(result.IsSuccessful); } } }
/// <summary> /// Creates a document given an ID /// </summary> /// <param name="id">The ID for the document</param> public MutableDocument(string id) : this(null, id ?? Misc.CreateGuid(), null) { }