private ClusterManager.RepairKit KillIncrementally(ClusterManager.ManagedCluster cluster, HighlyAvailableGraphDatabase failed1, HighlyAvailableGraphDatabase failed2, HighlyAvailableGraphDatabase failed3) { ClusterManager.RepairKit firstFailure = cluster.Fail(failed1); cluster.Await(instanceEvicted(failed1)); cluster.Fail(failed2); cluster.Await(instanceEvicted(failed2)); cluster.Fail(failed3); cluster.Await(instanceEvicted(failed3)); return(firstFailure); }
//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in C#: //ORIGINAL LINE: private void testFailOver(int clusterSize) throws Throwable private void TestFailOver(int clusterSize) { // given ClusterManager clusterManager = (new ClusterManager.Builder()).withRootDirectory(Dir.cleanDirectory("failover")).withCluster(ClusterManager.clusterOfSize(clusterSize)).build(); clusterManager.Start(); ClusterManager.ManagedCluster cluster = clusterManager.Cluster; cluster.Await(ClusterManager.allSeesAllAsAvailable()); HighlyAvailableGraphDatabase oldMaster = cluster.Master; // When long start = System.nanoTime(); ClusterManager.RepairKit repairKit = cluster.Fail(oldMaster); Logger.Logger.warning("Shut down master"); // Then cluster.Await(ClusterManager.masterAvailable(oldMaster)); long end = System.nanoTime(); Logger.Logger.warning("Failover took:" + (end - start) / 1000000 + "ms"); repairKit.Repair(); Thread.Sleep(3000); // give repaired instance chance to cleanly rejoin and exit faster clusterManager.SafeShutdown(); }
//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in C#: //ORIGINAL LINE: private void testFailoverWithAdditionalSlave(int clusterSize, int[] slaveIndexes) throws Throwable private void TestFailoverWithAdditionalSlave( int clusterSize, int[] slaveIndexes ) { File root = Dir.cleanDirectory( "testcluster_" + Name.MethodName ); ClusterManager manager = ( new ClusterManager.Builder() ).withRootDirectory(root).withCluster(ClusterManager.clusterOfSize(clusterSize)).build(); try { manager.Start(); ClusterManager.ManagedCluster cluster = manager.Cluster; cluster.Await( allSeesAllAsAvailable() ); cluster.Await( masterAvailable() ); ICollection<HighlyAvailableGraphDatabase> failed = new List<HighlyAvailableGraphDatabase>(); ICollection<ClusterManager.RepairKit> repairKits = new List<ClusterManager.RepairKit>(); foreach ( int slaveIndex in slaveIndexes ) { HighlyAvailableGraphDatabase nthSlave = GetNthSlave( cluster, slaveIndex ); failed.Add( nthSlave ); ClusterManager.RepairKit repairKit = cluster.Fail( nthSlave ); repairKits.Add( repairKit ); } HighlyAvailableGraphDatabase oldMaster = cluster.Master; failed.Add( oldMaster ); repairKits.Add( cluster.Fail( oldMaster ) ); cluster.Await( masterAvailable( ToArray( failed ) ) ); foreach ( ClusterManager.RepairKit repairKit in repairKits ) { repairKit.Repair(); } Thread.Sleep( 3000 ); // give repaired instances a chance to cleanly rejoin and exit faster } finally { manager.SafeShutdown(); } }
//JAVA TO C# CONVERTER TODO TASK: Most Java annotations will not have direct .NET equivalent attributes: //ORIGINAL LINE: @Test public void shouldContinueServingBoltRequestsBetweenInternalRestarts() throws Throwable //JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in C#: public virtual void ShouldContinueServingBoltRequestsBetweenInternalRestarts() { // given /* * Interestingly, it is enough to simply start a slave and then direct sessions to it. The problem seems * to arise immediately, since simply from startup to being into SLAVE at least one internal restart happens * and that seems sufficient to break the bolt server. * However, that would make the test really weird, so we'll start the cluster, make sure we can connect and * then isolate the slave, make it shutdown internally, then have it rejoin and it will switch to slave. * At the end of this process, it must still be possible to open and execute transactions against the instance. */ ClusterManager.ManagedCluster cluster = ClusterRule.startCluster(); HighlyAvailableGraphDatabase slave1 = cluster.AnySlave; Driver driver = GraphDatabase.driver(cluster.GetBoltAddress(slave1), AuthTokens.basic("neo4j", "neo4j")); /* * We'll use a bookmark to enforce use of kernel internals by the bolt server, to make sure that parts that are * switched during an internal restart are actually refreshed. Technically, this is not necessary, since the * bolt server makes such use for every request. But this puts a nice bow on top of it. */ string lastBookmark = InExpirableSession(driver, Driver.session, s => { using (Transaction tx = s.beginTransaction()) { tx.run("CREATE (person:Person {name: {name}, title: {title}})", parameters("name", "Webber", "title", "Mr")); tx.success(); } return(s.lastBookmark()); }); // when ClusterManager.RepairKit slaveFailRK = cluster.Fail(slave1); cluster.Await(entireClusterSeesMemberAsNotAvailable(slave1)); slaveFailRK.Repair(); cluster.Await(masterSeesMembers(3)); // then int?count = InExpirableSession(driver, Driver.session, s => { Record record; using (Transaction tx = s.beginTransaction(lastBookmark)) { record = tx.run("MATCH (n:Person) RETURN COUNT(*) AS count").next(); tx.success(); } return(record.get("count").asInt()); }); assertEquals(1, count.Value); }
//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in C#: //ORIGINAL LINE: private void reelectTheSameMasterMakingItGoToPendingAndBack(org.neo4j.kernel.impl.ha.ClusterManager.ManagedCluster cluster) throws Throwable private void ReelectTheSameMasterMakingItGoToPendingAndBack(ClusterManager.ManagedCluster cluster) { HighlyAvailableGraphDatabase master = cluster.Master; // Fail master and wait for master to go to pending, since it detects it's partitioned away ClusterManager.RepairKit masterRepair = cluster.Fail(master, false, ClusterManager.NetworkFlag.IN, ClusterManager.NetworkFlag.OUT); cluster.Await(memberThinksItIsRole(master, UNKNOWN)); // Then Immediately repair masterRepair.Repair(); // Wait for this instance to go to master again, since the other instances are slave only cluster.Await(memberThinksItIsRole(master, MASTER)); cluster.Await(ClusterManager.masterAvailable()); assertEquals(master, cluster.Master); }
/// <summary> /// Main difference to <seealso cref="shouldCopyStoreFromMasterIfBranched()"/> is that no instances are shut down /// during the course of the test. This to test functionality of some internal components being restarted. /// </summary> //JAVA TO C# CONVERTER TODO TASK: Most Java annotations will not have direct .NET equivalent attributes: //ORIGINAL LINE: @SuppressWarnings("unchecked") @Test public void shouldCopyStoreFromMasterIfBranchedInLiveScenario() throws Throwable //JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in C#: public virtual void ShouldCopyStoreFromMasterIfBranchedInLiveScenario() { // GIVEN a cluster of 3, all having the same data (node A) // thor is whoever is the master to begin with // odin is whoever is picked as _the_ slave given thor as initial master File storeDirectory = _directory.directory(); ClusterManager clusterManager = _life.add(new ClusterManager.Builder(storeDirectory) .withSharedConfig(stringMap(HaSettings.tx_push_factor.name(), "0", HaSettings.pull_interval.name(), "0")).build()); ClusterManager.ManagedCluster cluster = clusterManager.Cluster; cluster.Await(allSeesAllAsAvailable()); HighlyAvailableGraphDatabase thor = cluster.Master; string indexName = "valhalla"; CreateNode(thor, "A", AndIndexInto(indexName)); cluster.Sync(); // WHEN creating a node B1 on thor (note the disabled cluster transaction propagation) CreateNode(thor, "B1", AndIndexInto(indexName)); // and right after that failing the master so that it falls out of the cluster HighlyAvailableGraphDatabase odin = cluster.AnySlave; cluster.Info(format("%n ==== TAMPERING WITH " + thor + "'s CABLES ====%n")); ClusterManager.RepairKit thorRepairKit = cluster.Fail(thor); // try to create a transaction on odin until it succeeds cluster.Await(ClusterManager.masterAvailable(thor)); cluster.Await(ClusterManager.memberThinksItIsRole(odin, HighAvailabilityModeSwitcher.MASTER)); assertTrue(odin.Master); RetryOnTransactionFailure(odin, db => createNode(db, "B2", AndIndexInto(indexName))); // perform transactions so that index files changes under the hood ISet <File> odinLuceneFilesBefore = Iterables.asSet(GatherLuceneFiles(odin, indexName)); for (char prefix = 'C'; !Changed(odinLuceneFilesBefore, Iterables.asSet(GatherLuceneFiles(odin, indexName))); prefix++) { char fixedPrefix = prefix; RetryOnTransactionFailure(odin, db => createNodes(odin, fixedPrefix.ToString(), 10_000, AndIndexInto(indexName))); cluster.Force(); // Force will most likely cause lucene explicit indexes to commit and change file structure } // so anyways, when thor comes back into the cluster cluster.Info(format("%n ==== REPAIRING CABLES ====%n")); cluster.Await(memberThinksItIsRole(thor, UNKNOWN)); BranchMonitor thorHasBranched = InstallBranchedDataMonitor(cluster.GetMonitorsByDatabase(thor)); thorRepairKit.Repair(); cluster.Await(memberThinksItIsRole(thor, SLAVE)); cluster.Await(memberThinksItIsRole(odin, MASTER)); cluster.Await(allSeesAllAsAvailable()); assertFalse(thor.Master); assertTrue("No store-copy performed", thorHasBranched.CopyCompleted); assertTrue("Store-copy unsuccessful", thorHasBranched.CopySuccessful); // Now do some more transactions on current master (odin) and have thor pull those for (int i = 0; i < 3; i++) { int ii = i; RetryOnTransactionFailure(odin, db => createNodes(odin, ("" + ii).ToString(), 10, AndIndexInto(indexName))); cluster.Sync(); cluster.Force(); } // THEN thor should be a slave, having copied a store from master and good to go assertFalse(HasNode(thor, "B1")); assertTrue(HasNode(thor, "B2")); assertTrue(HasNode(thor, "C-0")); assertTrue(HasNode(thor, "0-0")); assertTrue(HasNode(odin, "0-0")); }