public void CanReplicateSegmentWithDataWhileStillServingRequestForSegment() { using (var storageProxy = new DistributedHashTableStorageClient(storageHost.Endpoint)) { var topology = new DistributedHashTableMasterClient(masterUri).GetTopology(); storageProxy.Put(topology.Version, new ExtendedPutRequest { Bytes = new byte[] { 1, 2, 3 }, Key = "test", Segment = 1, }); var result = storageProxy.ReplicateNextPage(NodeEndpoint.ForTest(13), ReplicationType.Ownership, 1); Assert.Equal("test", result.PutRequests[0].Key); storageProxy.Put(topology.Version, new ExtendedPutRequest { Bytes = new byte[] { 1, 2, 3 }, Key = "test2", Segment = 1, }); result = storageProxy.ReplicateNextPage(NodeEndpoint.ForTest(13), ReplicationType.Ownership, 1); Assert.Equal("test2", result.PutRequests[0].Key); } }
public void CanRemoveItem() { using (var storageProxy = new DistributedHashTableStorageClient(storageHost.Endpoint)) { var masterProxy = new DistributedHashTableMasterClient(masterUri); var topology = masterProxy.GetTopology(); var results = storageProxy.Put(topology.Version, new ExtendedPutRequest { Bytes = new byte[] { 1, 2, 3, 4 }, Key = "test", Segment = 1, }); Assert.False(results[0].ConflictExists); var removed = storageProxy.Remove(topology.Version, new ExtendedRemoveRequest { Key = "test", SpecificVersion = results[0].Version, Segment = 1 }); Assert.True(removed[0]); var values = storageProxy.Get(topology.Version, new ExtendedGetRequest { Key = "test", Segment = 1 }); Assert.Equal(0, values[0].Length); } }
public void TwoNodesCanJoinToTheCluster() { storageHostB.Start(); int countOfSegmentsInA = 0; int countOfSegmentsInB = 0; var masterProxy = new DistributedHashTableMasterClient(masterUri); for (int i = 0; i < 50; i++) { Topology topology = masterProxy.GetTopology(); Dictionary <NodeEndpoint, int> results = topology.Segments.GroupBy(x => x.AssignedEndpoint) .Select(x => new { x.Key, Count = x.Count() }) .ToDictionary(x => x.Key, x => x.Count); results.TryGetValue(storageHostA.Endpoint, out countOfSegmentsInA); results.TryGetValue(storageHostB.Endpoint, out countOfSegmentsInB); if (countOfSegmentsInA == countOfSegmentsInB && countOfSegmentsInB == 4096) { return; } Thread.Sleep(500); } Assert.True(false, "Should have found two nodes sharing responsability for the geometry: " + countOfSegmentsInA + " - " + countOfSegmentsInB); }
public AfterRestart() { host = new DistributedHashTableMasterHost(); host.Start(); using (var storageHost = new DistributedHashTableStorageHost(masterUri)) { storageHost.Start(); var masterProxy = new DistributedHashTableMasterClient(masterUri); while(true) { var topology = masterProxy.GetTopology(); if (topology.Segments.All(x => x.AssignedEndpoint != null)) break; Thread.Sleep(100); } } //restart host.Dispose(); host = new DistributedHashTableMasterHost(); host.Start(); }
public AfterRestart() { host = new DistributedHashTableMasterHost(); host.Start(); using (var storageHost = new DistributedHashTableStorageHost(masterUri)) { storageHost.Start(); var masterProxy = new DistributedHashTableMasterClient(masterUri); while (true) { var topology = masterProxy.GetTopology(); if (topology.Segments.All(x => x.AssignedEndpoint != null)) { break; } Thread.Sleep(100); } } //restart host.Dispose(); host = new DistributedHashTableMasterHost(); host.Start(); }
public void NodeHaveJoinedMasterAutomatically() { var masterProxy = new DistributedHashTableMasterClient(masterUri); var topology = masterProxy.GetTopology(); Assert.True(topology.Segments.All(x => x.AssignedEndpoint == storageHost.Endpoint)); }
public void ShouldRetainPreviousTopology() { var masterProxy = new DistributedHashTableMasterClient(masterUri); var topology = masterProxy.GetTopology(); Assert.True(topology.Segments.All(x => x.AssignedEndpoint != null)); }
public CanCommunicateWithMasterUsingClientProxy() { masterHost = new DistributedHashTableMasterHost(); masterHost.Start(); masterProxy = new DistributedHashTableMasterClient(new Uri("rhino.dht://localhost:2200")); }
public void AfterBothNodesJoinedWillAutomaticallyReplicateToBackupNode() { storageHostB.Start(); var masterProxy = new DistributedHashTableMasterClient(masterUri); Topology topology; for (int i = 0; i < 50; i++) { topology = masterProxy.GetTopology(); int count = topology.Segments .Where(x => x.AssignedEndpoint == storageHostA.Endpoint) .Count(); if (count == 4096) break; Thread.Sleep(500); } topology = masterProxy.GetTopology(); int segment = topology.Segments.First(x => x.AssignedEndpoint == storageHostA.Endpoint).Index; RepeatWhileThereAreTopologyChangedErrors(() => { using (var nodeA = new DistributedHashTableStorageClient(storageHostA.Endpoint)) { nodeA.Put(topology.Version, new ExtendedPutRequest { Bytes = new byte[] {2, 2, 0, 0}, Key = "abc", Segment = segment }); } }); RepeatWhileThereAreTopologyChangedErrors(() => { using (var nodeB = new DistributedHashTableStorageClient(storageHostB.Endpoint)) { topology = masterProxy.GetTopology(); Value[][] values = null; for (int i = 0; i < 100; i++) { values = nodeB.Get(topology.Version, new ExtendedGetRequest { Key = "abc", Segment = segment }); if (values[0].Length != 0) break; Thread.Sleep(250); } Assert.Equal(new byte[] {2, 2, 0, 0}, values[0][0].Data); } }); }
public void WillReplicateValuesToSecondJoin() { var masterProxy = new DistributedHashTableMasterClient(masterUri); using (var nodeA = new DistributedHashTableStorageClient(storageHostA.Endpoint)) { Topology topology = masterProxy.GetTopology(); nodeA.Put(topology.Version, new ExtendedPutRequest { Bytes = new byte[] { 2, 2, 0, 0 }, Key = "abc", Segment = 1 }); } storageHostB.Start(); //will replicate all odd segments here now for (int i = 0; i < 500; i++) { Topology topology = masterProxy.GetTopology(); if (topology.Segments[1].AssignedEndpoint == storageHostB.Endpoint) { break; } Thread.Sleep(500); } Value[][] values = null; RepeatWhileThereAreTopologyChangedErrors(() => { using (var nodeB = new DistributedHashTableStorageClient(storageHostB.Endpoint)) { Topology topology = masterProxy.GetTopology(); values = nodeB.Get(topology.Version, new ExtendedGetRequest { Key = "abc", Segment = 1 }); } }); Assert.Equal(new byte[] { 2, 2, 0, 0 }, values[0][0].Data); }
public void WhenFinishedReplicatingWillTellTheReplicatorSo() { using (var storageProxy = new DistributedHashTableStorageClient(storageHost.Endpoint)) { var topology = new DistributedHashTableMasterClient(masterUri).GetTopology(); storageProxy.Put(topology.Version, new ExtendedPutRequest { Bytes = new byte[] { 1, 2, 3 }, Key = "test", Segment = 1, }); var result = storageProxy.ReplicateNextPage(NodeEndpoint.ForTest(13), ReplicationType.Ownership, 1); Assert.Equal("test", result.PutRequests[0].Key); result = storageProxy.ReplicateNextPage(NodeEndpoint.ForTest(13), ReplicationType.Ownership, 1); Assert.True(result.Done); } }
public void WhenReplicatingEmptySegmentsWillNotReplicateSegmentsThatHasValues() { using (var storageProxy = new DistributedHashTableStorageClient(storageHost.Endpoint)) { var topology = new DistributedHashTableMasterClient(masterUri).GetTopology(); storageProxy.Put(topology.Version, new ExtendedPutRequest { Bytes = new byte[] { 1, 2, 3 }, Key = "test", Segment = 1, }); var segments = new[] { 1, 2, 3 }; var assignedSegments = storageProxy.AssignAllEmptySegments(NodeEndpoint.ForTest(13), ReplicationType.Ownership, segments); Assert.Equal(new[] { 2, 3 }, assignedSegments); } }
public void AfterTwoNodesJoinTheClusterEachSegmentHasBackup() { storageHostB.Start(); var masterProxy = new DistributedHashTableMasterClient(masterUri); Topology topology; for (int i = 0; i < 50; i++) { topology = masterProxy.GetTopology(); bool allSegmentsHaveBackups = topology.Segments.All(x => x.Backups.Count > 0); if (allSegmentsHaveBackups) { break; } Thread.Sleep(500); } topology = masterProxy.GetTopology(); Assert.True( topology.Segments.All(x => x.Backups.Count > 0) ); }
public void AfterTwoNodesJoinTheClusterEachSegmentHasBackup() { storageHostB.Start(); var masterProxy = new DistributedHashTableMasterClient(masterUri); Topology topology; for (int i = 0; i < 50; i++) { topology = masterProxy.GetTopology(); bool allSegmentsHaveBackups = topology.Segments.All(x => x.Backups.Count > 0); if (allSegmentsHaveBackups) break; Thread.Sleep(500); } topology = masterProxy.GetTopology(); Assert.True( topology.Segments.All(x => x.Backups.Count > 0) ); }
public void WillReplicateValuesToSecondJoin() { var masterProxy = new DistributedHashTableMasterClient(masterUri); using (var nodeA = new DistributedHashTableStorageClient(storageHostA.Endpoint)) { Topology topology = masterProxy.GetTopology(); nodeA.Put(topology.Version, new ExtendedPutRequest { Bytes = new byte[] {2, 2, 0, 0}, Key = "abc", Segment = 1 }); } storageHostB.Start(); //will replicate all odd segments here now for (int i = 0; i < 500; i++) { Topology topology = masterProxy.GetTopology(); if (topology.Segments[1].AssignedEndpoint == storageHostB.Endpoint) break; Thread.Sleep(500); } Value[][] values = null; RepeatWhileThereAreTopologyChangedErrors(() => { using (var nodeB = new DistributedHashTableStorageClient(storageHostB.Endpoint)) { Topology topology = masterProxy.GetTopology(); values = nodeB.Get(topology.Version, new ExtendedGetRequest { Key = "abc", Segment = 1 }); } }); Assert.Equal(new byte[] {2, 2, 0, 0}, values[0][0].Data); }
public void WhenReplicatingEmptySegmentsWillNotReplicateSegmentsThatHasValues() { using (var storageProxy = new DistributedHashTableStorageClient(storageHost.Endpoint)) { var topology = new DistributedHashTableMasterClient(masterUri).GetTopology(); storageProxy.Put(topology.Version, new ExtendedPutRequest { Bytes = new byte[] {1, 2, 3}, Key = "test", Segment = 1, }); var segments = new[] { 1, 2, 3 }; var assignedSegments = storageProxy.AssignAllEmptySegments(NodeEndpoint.ForTest(13), ReplicationType.Ownership, segments); Assert.Equal(new[]{2,3}, assignedSegments); } }
public void AfterBothNodesJoinedWillAutomaticallyReplicateToBackupNode() { storageHostB.Start(); var masterProxy = new DistributedHashTableMasterClient(masterUri); Topology topology; for (int i = 0; i < 50; i++) { topology = masterProxy.GetTopology(); int count = topology.Segments .Where(x => x.AssignedEndpoint == storageHostA.Endpoint) .Count(); if (count == 4096) { break; } Thread.Sleep(500); } topology = masterProxy.GetTopology(); int segment = topology.Segments.First(x => x.AssignedEndpoint == storageHostA.Endpoint).Index; RepeatWhileThereAreTopologyChangedErrors(() => { using (var nodeA = new DistributedHashTableStorageClient(storageHostA.Endpoint)) { nodeA.Put(topology.Version, new ExtendedPutRequest { Bytes = new byte[] { 2, 2, 0, 0 }, Key = "abc", Segment = segment }); } }); RepeatWhileThereAreTopologyChangedErrors(() => { using (var nodeB = new DistributedHashTableStorageClient(storageHostB.Endpoint)) { topology = masterProxy.GetTopology(); Value[][] values = null; for (int i = 0; i < 100; i++) { values = nodeB.Get(topology.Version, new ExtendedGetRequest { Key = "abc", Segment = segment }); if (values[0].Length != 0) { break; } Thread.Sleep(250); } Assert.Equal(new byte[] { 2, 2, 0, 0 }, values[0][0].Data); } }); }
public void TwoNodesCanJoinToTheCluster() { storageHostB.Start(); int countOfSegmentsInA = 0; int countOfSegmentsInB = 0; var masterProxy = new DistributedHashTableMasterClient(masterUri); for (int i = 0; i < 50; i++) { Topology topology = masterProxy.GetTopology(); Dictionary<NodeEndpoint, int> results = topology.Segments.GroupBy(x => x.AssignedEndpoint) .Select(x => new {x.Key, Count = x.Count()}) .ToDictionary(x => x.Key, x => x.Count); results.TryGetValue(storageHostA.Endpoint, out countOfSegmentsInA); results.TryGetValue(storageHostB.Endpoint, out countOfSegmentsInB); if (countOfSegmentsInA == countOfSegmentsInB && countOfSegmentsInB == 4096) return; Thread.Sleep(500); } Assert.True(false, "Should have found two nodes sharing responsability for the geometry: " + countOfSegmentsInA + " - " + countOfSegmentsInB); }