public void ShouldDeleteCommitPointIfCouldNotRecoverFromIt() { var dataDir = NewDataPath("ShouldDeleteCommitPointIfCouldNotRecoverFromIt"); string indexFullPath; string commitPointsDirectory; var index = new MapRecoveryTestIndex(); using (var server = GetNewServer(runInMemory: false, dataDirectory: dataDir)) { CommitPointAfterEachCommit(server.SystemDatabase.Configuration); using (var store = new DocumentStore { Url = "http://localhost:8079" }.Initialize()) { index.Execute(store); using (var session = store.OpenSession()) { session.Store(new Recovery { Name = "One", Number = 1 }); session.SaveChanges(); // first commit point WaitForIndexing(store); session.Store(new Recovery { Name = "Two", Number = 2 }); session.SaveChanges(); // second commit point WaitForIndexing(store); session.Store(new Recovery { Name = "Three", Number = 3 }); session.SaveChanges(); // second commit point WaitForIndexing(store, timeout: TimeSpan.FromSeconds(60)); } } Index indexInstance = server.SystemDatabase.IndexStorage.GetIndexInstance(index.IndexName); commitPointsDirectory = Path.Combine(server.SystemDatabase.Configuration.IndexStoragePath, indexInstance.IndexId + "\\CommitPoints"); indexFullPath = Path.Combine(server.SystemDatabase.Configuration.IndexStoragePath, indexInstance.IndexId.ToString(CultureInfo.InvariantCulture)); } // make sure that there are 3 commit points var directories = Directory.GetDirectories(commitPointsDirectory); Assert.Equal(3, directories.Length); // mess "index.CommitPoint" file in the SECOND commit point by adding additional files required to recover from it using (var commitPointFile = File.Open(Path.Combine(directories[1], "index.CommitPoint"), FileMode.Open)) { var jsonSerializer = new JsonSerializer(); var textReader = new JsonTextReader(new StreamReader(commitPointFile)); var indexCommit = jsonSerializer.Deserialize <IndexCommitPoint>(textReader); indexCommit.SegmentsInfo.ReferencedFiles.Add("file-that-doesnt-exist"); commitPointFile.Position = 0; using (var sw = new StreamWriter(commitPointFile)) { var textWriter = new JsonTextWriter(sw); jsonSerializer.Serialize(textWriter, indexCommit); sw.Flush(); } } // mess "index.CommitPoint" file in the THIRD commit point by adding additional files required to recover from it using (var commitPointFile = File.Open(Path.Combine(directories[2], "index.CommitPoint"), FileMode.Open)) { var jsonSerializer = new JsonSerializer(); var textReader = new JsonTextReader(new StreamReader(commitPointFile)); var indexCommit = jsonSerializer.Deserialize <IndexCommitPoint>(textReader); indexCommit.SegmentsInfo.ReferencedFiles.Add("file-that-doesnt-exist"); commitPointFile.Position = 0; using (var sw = new StreamWriter(commitPointFile)) { var textWriter = new JsonTextWriter(sw); jsonSerializer.Serialize(textWriter, indexCommit); sw.Flush(); } } IndexMessing.MessSegmentsFile(indexFullPath); using (GetNewServer(runInMemory: false, dataDirectory: dataDir)) // do not delete previous directory { using (var store = new DocumentStore { Url = "http://localhost:8079" }.Initialize()) { using (var session = store.OpenSession()) { var result = session.Query <Recovery, MapRecoveryTestIndex>().Customize(x => x.WaitForNonStaleResults()).ToList(); Assert.Equal(3, result.Count); } } } // there should be exactly 2 commit points: // the first one which we used to recover // and the second one created because of indexing after recovery Assert.Equal(2, Directory.GetDirectories(commitPointsDirectory).Length); }
public void ShouldRecoverMapIndexFromLastCommitPoint() { var dataDir = NewDataPath("RecoverMapIndex"); string indexFullPath; string commitPointsDirectory; var index = new MapRecoveryTestIndex(); using (var server = GetNewServer(runInMemory: false, dataDirectory: dataDir)) { CommitPointAfterFirstCommitOnly(server.SystemDatabase.Configuration); using (var store = new DocumentStore { Url = "http://localhost:8079" }.Initialize()) { index.Execute(store); using (var session = store.OpenSession()) { session.Store(new Recovery // indexing this entity will create commit point { Name = "One", Number = 1 }); session.SaveChanges(); WaitForIndexing(store); session.Store(new Recovery // this will not be in commit point { Name = "Two", Number = 2 }); session.SaveChanges(); WaitForIndexing(store); } } Index indexInstance = server.SystemDatabase.IndexStorage.GetIndexInstance(index.IndexName); commitPointsDirectory = Path.Combine(server.SystemDatabase.Configuration.IndexStoragePath, indexInstance.IndexId + "\\CommitPoints"); indexFullPath = Path.Combine(server.SystemDatabase.Configuration.IndexStoragePath, indexInstance.IndexId.ToString(CultureInfo.InvariantCulture)); } // make sure that there is only one commit point - which doesn't have the second entity indexed Assert.Equal(1, Directory.GetDirectories(commitPointsDirectory).Length); IndexMessing.MessSegmentsFile(indexFullPath); using (GetNewServer(runInMemory: false, dataDirectory: dataDir)) // do not delete previous directory { using (var store = new DocumentStore { Url = "http://localhost:8079" }.Initialize()) { using (var session = store.OpenSession()) { var result = session.Query <Recovery, MapRecoveryTestIndex>().Customize(x => x.WaitForNonStaleResults()).ToList(); Assert.Equal(2, result.Count); } } // here we should have another commit point because missing document after restore were indexed again Assert.Equal(2, Directory.GetDirectories(commitPointsDirectory).Length); } }
public void ShouldRecoverDeletes() { var dataDir = NewDataPath("ShouldRecoverDeletes"); string indexFullPath; string commitPointsDirectory; var index = new MapRecoveryTestIndex(); using (var server = GetNewServer(runInMemory: false, dataDirectory: dataDir)) { CommitPointAfterFirstCommitOnly(server.SystemDatabase.Configuration); using (var store = new DocumentStore { Url = "http://localhost:8079" }.Initialize()) { index.Execute(store); using (var session = store.OpenSession()) { session.Store(new Recovery { Name = "One", Number = 1 }); session.Store(new Recovery { Name = "Two", Number = 2 }); session.Store(new Recovery { Name = "Three", Number = 3 }); session.SaveChanges(); // store all items in commit point WaitForIndexing(store); var itemToDelete = session.Query <Recovery, MapRecoveryTestIndex>().First(); session.Delete(itemToDelete); session.SaveChanges(); WaitForIndexing(store); } } Index indexInstance = server.SystemDatabase.IndexStorage.GetIndexInstance(index.IndexName); commitPointsDirectory = Path.Combine(server.SystemDatabase.Configuration.IndexStoragePath, indexInstance.IndexId + "\\CommitPoints"); indexFullPath = Path.Combine(server.SystemDatabase.Configuration.IndexStoragePath, indexInstance.IndexId.ToString(CultureInfo.InvariantCulture)); } // make sure that there is only one commit point - which doesn't have the second entity indexed Assert.Equal(1, Directory.GetDirectories(commitPointsDirectory).Length); IndexMessing.MessSegmentsFile(indexFullPath); using (GetNewServer(runInMemory: false, dataDirectory: dataDir)) // do not delete previous directory { using (var store = new DocumentStore { Url = "http://localhost:8079" }.Initialize()) { using (var session = store.OpenSession()) { var result = session.Query <Recovery, MapRecoveryTestIndex>().ToArray(); Assert.Equal(2, result.Length); } } } }
public void ShouldRegenerateMapReduceIndex() { var index = new MapReduceRecoveryTestIndex(); string indexFullPath; using (var server = GetNewServer(runInMemory: false, dataDirectory: DataDir)) { indexFullPath = Path.Combine(server.Database.Configuration.IndexStoragePath, MonoHttpUtility.UrlEncode(index.IndexName)); using (var store = new DocumentStore { Url = "http://localhost:8079" }.Initialize()) { index.Execute(store); using (var session = store.OpenSession()) { // reduce in single step for (var i = 0; i < 5; i++) { session.Store(new Recovery { Name = "One", Number = 1 }); session.Store(new Recovery { Name = "Two", Number = 2 }); } // reduce in multiple step for (int i = 0; i < 100; i++) { session.Store(new Recovery { Name = "Three", Number = 3 }); session.Store(new Recovery { Name = "Four", Number = 4 }); } session.SaveChanges(); } WaitForIndexing(store); } } IndexMessing.MessSegmentsFile(indexFullPath); using (GetNewServer(runInMemory: false, dataDirectory: DataDir, deleteDirectory: false)) // do not delete previous directory { using (var store = new DocumentStore { Url = "http://localhost:8079" }.Initialize()) { using (var session = store.OpenSession()) { var result = session.Query <Recovery, MapReduceRecoveryTestIndex>().Customize(x => x.WaitForNonStaleResults()).OrderBy(x => x.Number).ToList(); Assert.Equal(4, result.Count); Assert.Equal("One", result[0].Name); Assert.Equal(5, result[0].Number); Assert.Equal("Two", result[1].Name); Assert.Equal(10, result[1].Number); Assert.Equal("Three", result[2].Name); Assert.Equal(300, result[2].Number); Assert.Equal("Four", result[3].Name); Assert.Equal(400, result[3].Number); } } } }