Пример #1
0
        public Segment[] Join(NodeEndpoint endpoint)
        {
            return(Execute((writer, stream) =>
            {
                writer.Write(new MasterMessageUnion.Builder
                {
                    Type = MasterMessageType.JoinRequest,
                    JoinRequest = new JoinRequestMessage.Builder
                    {
                        EndpointJoining = new Protocol.NodeEndpoint.Builder
                        {
                            Async = endpoint.Async.ToString(),
                            Sync = endpoint.Sync.ToString()
                        }.Build()
                    }.Build()
                }.Build());
                writer.Flush();
                stream.Flush();

                var union = ReadReply(MasterMessageType.JoinResult, stream);

                var response = union.JoinResponse;

                return response.SegmentsList.Select(x => x.GetSegment()).ToArray();
            }));
        }
        /// <summary>
        /// Notify the master that the endpoint has caught up on all the specified segments
        /// </summary>
        public void CaughtUp(NodeEndpoint endpoint,
                             ReplicationType type,
                             params int[] caughtUpSegments)
        {
            var matchingSegments = GetMatchingSegments(caughtUpSegments, endpoint);

            var modifiedSegments = from segment in Segments
                                   join caughtUpSegment in matchingSegments on segment.Index equals caughtUpSegment.Index into
                                   maybeMatchingSegment
                                   select
                                   new MatchSegment {
                Segment = segment, Matching = maybeMatchingSegment
            };

            switch (type)
            {
            case ReplicationType.Ownership:
                CaughtUpOnOwnership(modifiedSegments, endpoint);
                break;

            case ReplicationType.Backup:
                CaughtUpOnBackups(modifiedSegments, endpoint);
                break;

            default:
                throw new InvalidOperationException("Unknown replication type: " + type);
            }
            LogCurrentSegmentAssignment();
            TopologyChanged();
        }
        public DistributedHashTableStorageHost(
            Uri master,
            string name,
            int port)
        {
            Endpoint = new NodeEndpoint
            {
                Sync = new Uri("rhino.dht://" + Environment.MachineName + ":" + port + "/"),
                Async = new Uri("rhino.queues://" + Environment.MachineName + ":" + (port + 1) + "/replication")
            };
            queueManager = new QueueManager(new IPEndPoint(IPAddress.Any, port + 1), name + ".queue.esent");
            queueManager.CreateQueues("replication");
            node = new DistributedHashTableNode(
                new DistributedHashTableMasterClient(master),
                new ThreadPoolExecuter(),
                new BinaryMessageSerializer(),
                Endpoint,
                queueManager,
                new NonPooledDistributedHashTableNodeFactory()
                );
            var dhtStorage = new DistributedHashTableStorage(name + ".data.esent", node);
            replication = dhtStorage.Replication;
            storage = dhtStorage;

            listener = new TcpListener(
                Socket.OSSupportsIPv6 ? IPAddress.IPv6Any : IPAddress.Any,
                port);
        }
Пример #4
0
            public void CanGiveUpOnSegment()
            {
                var existingEndpoint = new NodeEndpoint
                {
                    Async = new Uri("rhino.queues://other:2202/replication"),
                    Sync = new Uri("rhino.dht://other:2201")
                };
                masterProxy.Join(existingEndpoint);

                var newEndpoint = new NodeEndpoint
                {
                    Async = new Uri("rhino.queues://localhost:2202/replication"),
                    Sync = new Uri("rhino.dht://localhost:2201")
                };

                var segments = masterProxy.Join(newEndpoint);

                masterProxy.GaveUp(newEndpoint, ReplicationType.Ownership, segments[0].Index, segments[1].Index);

                var topology = masterProxy.GetTopology();
                Assert.Equal(existingEndpoint,topology.Segments[segments[0].Index].AssignedEndpoint);
                Assert.Equal(existingEndpoint, topology.Segments[segments[1].Index].AssignedEndpoint);

                Assert.Null(topology.Segments[segments[0].Index].InProcessOfMovingToEndpoint);
                Assert.Null(topology.Segments[segments[1].Index].InProcessOfMovingToEndpoint);
            }
Пример #5
0
 public IDistributedHashTableStorage Create(NodeEndpoint endpoint)
 {
     PooledDistributedHashTableStorageClientConnection storage = null;
     lock (locker)
     {
         LinkedList<PooledDistributedHashTableStorageClientConnection> value;
         if (pooledConnections.TryGetValue(endpoint, out value) && value.Count > 0)
         {
             storage = value.First.Value;
             value.RemoveFirst();
         }
     }
     if (storage != null)
     {
         if (storage.Connected == false)
         {
             log.DebugFormat("Found unconnected connection in the pool for {0}", endpoint.Sync);
             try
             {
                 storage.Dispose();
             }
             catch (Exception e)
             {
                 log.Debug("Error when disposing unconnected connection in the pool", e);
             }
         }
         else
         {
             return storage;
         }
     }
     log.DebugFormat("Creating new connection in the pool to {0}", endpoint.Sync);
     return new PooledDistributedHashTableStorageClientConnection(this, endpoint);
 }
 public DistributedHashTableStorageClient(NodeEndpoint endpoint)
 {
     this.endpoint = endpoint;
     Client = new TcpClient(endpoint.Sync.Host, endpoint.Sync.Port);
     stream = Client.GetStream();
     writer = new MessageStreamWriter<StorageMessageUnion>(stream);
 }
        public ReplicationResult ReplicateNextPage(NodeEndpoint replicationEndpoint,
                                                   ReplicationType type,
                                                   int segment)
        {
            writer.Write(new StorageMessageUnion.Builder
            {
                Type = StorageMessageType.ReplicateNextPageRequest,
                ReplicateNextPageRequest = new ReplicateNextPageRequestMessage.Builder
                {
                    ReplicationEndpoint = replicationEndpoint.GetNodeEndpoint(),
                    Segment             = segment,
                    Type = type == ReplicationType.Backup? Protocol.ReplicationType.Backup : Protocol.ReplicationType.Ownership
                }.Build()
            }.Build());
            writer.Flush();
            stream.Flush();

            var union = ReadReply(StorageMessageType.ReplicateNextPageResponse);

            return(new ReplicationResult
            {
                Done = union.ReplicateNextPageResponse.Done,
                PutRequests = union.ReplicateNextPageResponse.PutRequestsList.Select(
                    x => x.GetPutRequest()
                    ).ToArray(),
                RemoveRequests = union.ReplicateNextPageResponse.RemoveRequestsList.Select(
                    x => x.GetRemoveRequest()
                    ).ToArray()
            });
        }
 public DistributedHashTableStorageClient(NodeEndpoint endpoint)
 {
     this.endpoint = endpoint;
     Client        = new TcpClient(endpoint.Sync.Host, endpoint.Sync.Port);
     stream        = Client.GetStream();
     writer        = new MessageStreamWriter <StorageMessageUnion>(stream);
 }
        private Segment[] RestructureSegmentsFairly(NodeEndpoint point)
        {
            var newSegments = new List <Segment>();
            var index       = 0;

            foreach (var segment in Segments)
            {
                index += 1;

                if (segment.InProcessOfMovingToEndpoint != null)
                {
                    newSegments.Add(segment);
                    continue;
                }
                if (index % endpoints.Count == 0)
                {
                    newSegments.Add(new Segment
                    {
                        AssignedEndpoint            = segment.AssignedEndpoint,
                        InProcessOfMovingToEndpoint = point,
                        Index          = segment.Index,
                        PendingBackups = segment.PendingBackups
                    });
                }
                else
                {
                    newSegments.Add(segment);
                }
            }
            // this does NOT create a new topology version
            Topology = new Topology(newSegments.ToArray(), Topology.Version);
            return(Segments.Where(x => x.BelongsTo(point)).ToArray());
        }
        public DistributedHashTableStorageHost(
            Uri master,
            string name,
            int port)
        {
            Endpoint = new NodeEndpoint
            {
                Sync  = new Uri("rhino.dht://" + Environment.MachineName + ":" + port + "/"),
                Async = new Uri("rhino.queues://" + Environment.MachineName + ":" + (port + 1) + "/replication")
            };
            queueManager = new QueueManager(new IPEndPoint(IPAddress.Any, port + 1), name + ".queue.esent");
            queueManager.CreateQueues("replication");
            node = new DistributedHashTableNode(
                new DistributedHashTableMasterClient(master),
                new ThreadPoolExecuter(),
                new BinaryMessageSerializer(),
                Endpoint,
                queueManager,
                new NonPooledDistributedHashTableNodeFactory()
                );
            var dhtStorage = new DistributedHashTableStorage(name + ".data.esent", node);

            replication = dhtStorage.Replication;
            storage     = dhtStorage;

            listener = new TcpListener(
                Socket.OSSupportsIPv6 ? IPAddress.IPv6Any : IPAddress.Any,
                port);
        }
Пример #11
0
        public void CaughtUp(NodeEndpoint endpoint,
                             ReplicationType type,
                             params int[] caughtUpSegments)
        {
            Execute((writer,
                     stream) =>
            {
                writer.Write(new MasterMessageUnion.Builder
                {
                    Type     = MasterMessageType.CaughtUpRequest,
                    CaughtUp = new CaughtUpRequestMessage.Builder
                    {
                        CaughtUpSegmentsList = { caughtUpSegments },
                        Type     = type == ReplicationType.Backup ? Protocol.ReplicationType.Backup : Protocol.ReplicationType.Ownership,
                        Endpoint = new Protocol.NodeEndpoint.Builder
                        {
                            Async = endpoint.Async.ToString(),
                            Sync  = endpoint.Sync.ToString()
                        }.Build()
                    }.Build()
                }.Build());
                writer.Flush();
                stream.Flush();

                ReadReply(MasterMessageType.CaughtUpResponse, stream);
            });
        }
Пример #12
0
 public HandlingAssignedSegments()
 {
     master = MockRepository.GenerateStub<IDistributedHashTableMaster>();
     executer = MockRepository.GenerateStub<IExecuter>();
     endPoint = NodeEndpoint.ForTest(1);
     node = new DistributedHashTableNode(master, executer, new BinaryMessageSerializer(), endPoint, MockRepository.GenerateStub<IQueueManager>(),
         MockRepository.GenerateStub<IDistributedHashTableNodeReplicationFactory>());
 }
        /// <summary>
        /// This method is called when a new node wants to join the cluster.
        /// The result is the segments that this node is responsible for, if it is an
        /// existing one, or the list of segments that it needs to pull from the currently
        /// assigned node.
        /// Note:
        /// that if it needs to pull date from the currently assigned node, it will
        /// also need to call the <see cref="CaughtUp"/> method to let the master know
        /// that it is done and that the topology changed.
        /// </summary>
        public Segment[] Join(NodeEndpoint endpoint)
        {
            var newlyAlocatedSegments = JoinInternal(endpoint);

            RearsegmentBackups();
            LogCurrentSegmentAssignment();
            return(newlyAlocatedSegments);
        }
Пример #14
0
 public bool Equals(NodeEndpoint other)
 {
     if (other == null)
     {
         return(false);
     }
     return(Sync == other.Sync && Async == other.Async);
 }
Пример #15
0
 public OnGaveUp()
 {
     master = new DistributedHashTableMaster();
     master.CaughtUp(NodeEndpoint.ForTest(9),
                     ReplicationType.Ownership,
                     master.Join(NodeEndpoint.ForTest(9)).Select(x => x.Index).ToArray());
     endPoint = NodeEndpoint.ForTest(5);
 }
Пример #16
0
            public OnMasterWithOneExistingNode()
            {
                master = new DistributedHashTableMaster();
                endPoint = NodeEndpoint.ForTest(9);

                var existingEndpoint = NodeEndpoint.ForTest(3);
                var ranges = master.Join(existingEndpoint);
                master.CaughtUp(existingEndpoint, ReplicationType.Ownership, ranges.Select(x => x.Index).ToArray());
            }
        private Segment[] GetMatchingSegments(IEnumerable <int> segments,
                                              NodeEndpoint endpoint)
        {
            var matchingSegments = segments.Select(i => Segments[i]).ToArray();

            var segmentsNotBeloningToThespecifiedEndpoint = matchingSegments
                                                            .Where(x => x.InProcessOfMovingToEndpoint != null)
                                                            .Where(x => endpoint.Equals(x.InProcessOfMovingToEndpoint) == false &&
                                                                   x.PendingBackups.Contains(endpoint) == false);

            if (segmentsNotBeloningToThespecifiedEndpoint.Count() != 0)
            {
                throw new InvalidOperationException("Could not catch up or give up on segments that belong to another endpoint");
            }
            return(matchingSegments);
        }
 public OnlineSegmentReplicationCommandTest()
 {
     node = MockRepository.GenerateStub<IDistributedHashTableNode>();
     replication = MockRepository.GenerateStub<IDistributedHashTableNodeReplication>();
     endpoint = NodeEndpoint.ForTest(1);
     node.Stub(x => x.Endpoint).Return(NodeEndpoint.ForTest(2));
     storage = MockRepository.GenerateStub<IDistributedHashTableStorage>();
     node.Storage = storage;
     node.Stub(x => x.GetTopologyVersion()).Return(topologyVersion);
     var factory = MockRepository.GenerateStub<IDistributedHashTableNodeReplicationFactory>();
     factory.Stub(x => x.Create(null)).IgnoreArguments().Return(replication);
     command = new OnlineSegmentReplicationCommand(
         endpoint,
         new[] { new Segment { Index = 0 }, new Segment { Index = 1 }, },
         ReplicationType.Ownership,
         node,
         factory);
 }
        public void GaveUp(NodeEndpoint endpoint,
                           ReplicationType type,
                           params int[] segmentsGivingUpOn)
        {
            var matchingSegments = GetMatchingSegments(segmentsGivingUpOn, endpoint);

            foreach (var segment in matchingSegments)
            {
                if (type == ReplicationType.Ownership)
                {
                    segment.InProcessOfMovingToEndpoint = null;
                }
                else
                {
                    segment.PendingBackups.Remove(endpoint);
                }
            }
        }
Пример #20
0
 public DistributedHashTableNode(IDistributedHashTableMaster master,
                                 IExecuter executer,
                                 IMessageSerializer messageSerializer,
                                 NodeEndpoint endpoint,
                                 IQueueManager queueManager,
                                 IDistributedHashTableNodeReplicationFactory replicationFactory)
 {
     this.master             = master;
     this.executer           = executer;
     this.messageSerializer  = messageSerializer;
     this.endpoint           = endpoint;
     this.queueManager       = queueManager;
     this.replicationFactory = replicationFactory;
     State = NodeState.NotStarted;
     backgroundReplication = new Thread(BackgroundReplication)
     {
         IsBackground = true
     };
 }
Пример #21
0
 public DistributedHashTableNode(IDistributedHashTableMaster master,
     IExecuter executer,
     IMessageSerializer messageSerializer,
     NodeEndpoint endpoint,
     IQueueManager queueManager,
     IDistributedHashTableNodeReplicationFactory replicationFactory)
 {
     this.master = master;
     this.executer = executer;
     this.messageSerializer = messageSerializer;
     this.endpoint = endpoint;
     this.queueManager = queueManager;
     this.replicationFactory = replicationFactory;
     State = NodeState.NotStarted;
     backgroundReplication = new Thread(BackgroundReplication)
     {
         IsBackground = true
     };
 }
        public int[] AssignAllEmptySegments(NodeEndpoint replicationEndpoint,
                                            ReplicationType type, int[] segments)
        {
            writer.Write(new StorageMessageUnion.Builder
            {
                Type = StorageMessageType.AssignAllEmptySegmentsRequest,
                AssignAllEmptySegmentsRequest = new AssignAllEmptySegmentsRequestMessage.Builder
                {
                    ReplicationEndpoint = replicationEndpoint.GetNodeEndpoint(),
                    Type         = type == ReplicationType.Backup? Protocol.ReplicationType.Backup : Protocol.ReplicationType.Ownership,
                    SegmentsList = { segments }
                }.Build()
            }.Build());
            writer.Flush();
            stream.Flush();

            var union = ReadReply(StorageMessageType.AssignAllEmptySegmentsResponse);

            return(union.AssignAllEmptySegmentsResponse.AssignedSegmentsList.ToArray());
        }
        public int[] AssignAllEmptySegments(NodeEndpoint replicationEndpoint,
            ReplicationType type,   int[] segments)
        {
            writer.Write(new StorageMessageUnion.Builder
            {
                Type = StorageMessageType.AssignAllEmptySegmentsRequest,
                AssignAllEmptySegmentsRequest = new AssignAllEmptySegmentsRequestMessage.Builder
                {
                    ReplicationEndpoint = replicationEndpoint.GetNodeEndpoint(),
                    Type = type == ReplicationType.Backup? Protocol.ReplicationType.Backup : Protocol.ReplicationType.Ownership,
                    SegmentsList = { segments }
                }.Build()
            }.Build());
            writer.Flush();
            stream.Flush();

            var union = ReadReply(StorageMessageType.AssignAllEmptySegmentsResponse);

            return union.AssignAllEmptySegmentsResponse.AssignedSegmentsList.ToArray();
        }
Пример #24
0
        private static void AssertSegmentNotMoved(PersistentHashTableActions actions,
                                                  int?segment)
        {
            if (segment < 0)
            {
                throw new ArgumentNullException("segment", "Segment cannot be negative");
            }

            var values = actions.Get(new GetRequest
            {
                Key = Constants.MovedSegment + segment
            });

            if (values.Length > 0)
            {
                throw new SeeOtherException("This key belongs to a segment assigned to another node")
                      {
                          Endpoint = NodeEndpoint.FromBytes(values[0].Data)
                      };
            }
        }
        public int[] AssignAllEmptySegments(NodeEndpoint replicationEndpoint,
            ReplicationType type,
            int[] segments)
        {
            var reservedSegments = new List<int>();

            hashTable.Batch(actions =>
            {
                foreach (var segment in segments)
                {
                    if (actions.HasTag(segment))
                        continue;
                    if (type == ReplicationType.Ownership &&
                        MarkSegmentAsAssignedToEndpoint(actions, replicationEndpoint, segment) == false)
                        continue;
                    reservedSegments.Add(segment);
                }
                actions.Commit();
            });

            return reservedSegments.ToArray();
        }
 private void CaughtUpOnOwnership(IEnumerable <MatchSegment> modifiedSegments,
                                  NodeEndpoint endpoint)
 {
     Topology = new Topology((
                                 from modifiedSegment in modifiedSegments
                                 let x = modifiedSegment.Matching.FirstOrDefault()
                                         select x == null
                                                                                         ? modifiedSegment.Segment
                                                                                         : new Segment
     {
         Index = x.Index,
         InProcessOfMovingToEndpoint = null,
         AssignedEndpoint = endpoint,
         PendingBackups = x.PendingBackups
                          .Append(x.AssignedEndpoint)
                          .Where(e => e != endpoint)
                          .ToSet()
     }).ToArray(),
                             Topology.Version + 1
                             );
     RearsegmentBackups();
 }
        private Segment[] JoinInternal(NodeEndpoint endpoint)
        {
            log.DebugFormat("Endpoint {0} joining", endpoint.Sync);
            endpoints.Add(endpoint);
            if (Segments.Any(x => x.BelongsTo(endpoint)))
            {
                log.DebugFormat("Endpoint {0} is already registered, probably an end point restart, ignoring", endpoint.Sync);
                return(Segments.Where(x => x.BelongsTo(endpoint)).ToArray());
            }

            var thereAreSegementsWithNoowners = Segments
                                                .Any(x => x.AssignedEndpoint == null);

            if (thereAreSegementsWithNoowners)
            {
                Topology = new Topology(Segments
                                        .Where(x => x.AssignedEndpoint == null)
                                        .Select(x => new Segment
                {
                    AssignedEndpoint            = endpoint,
                    Backups                     = x.Backups,
                    Index                       = x.Index,
                    InProcessOfMovingToEndpoint = x.InProcessOfMovingToEndpoint,
                    PendingBackups              = x.PendingBackups
                }).ToArray(),
                                        Topology.Version + 1
                                        );
                TopologyChanged();
                log.DebugFormat("Endpoint {0} was assigned all segments without owners", endpoint.Sync);
                return(Segments.Where(x => x.AssignedEndpoint == endpoint).ToArray());
            }

            log.DebugFormat("New endpoint {0}, allocating segments for it", endpoint.Sync);

            return(RestructureSegmentsFairly(endpoint));
        }
Пример #28
0
 public void CanJoinToMaster()
 {
     var endpoint = new NodeEndpoint
     {
         Async = new Uri("rhino.queues://localhost:2202/replication"),
         Sync = new Uri("rhino.dht://localhost:2201")
     };
     var segments = masterProxy.Join(endpoint);
     Assert.Equal(Constants.NumberOfSegments, segments.Length);
     Assert.True(segments.All(x => x.AssignedEndpoint.Equals(endpoint)));
 }
Пример #29
0
 public bool IsOwnedBy(NodeEndpoint endpoint,
     int segment)
 {
     return GetSegment(segment).AssignedEndpoint == endpoint;
 }
Пример #30
0
 public bool Equals(NodeEndpoint other)
 {
     if(other == null)
         return false;
     return Sync == other.Sync && Async == other.Async;
 }
Пример #31
0
 private bool[] GetRemovesResults(NodeEndpoint endpoint,
     ExtendedRemoveRequest[] removeRequests,
     int backupIndex)
 {
     try
     {
         using (var client = pool.Create(endpoint))
         {
             return client.Remove(topology.Version, removeRequests);
         }
     }
     catch (SeeOtherException soe)
     {
         return GetRemovesResults(soe.Endpoint, removeRequests, backupIndex);
     }
     catch (TopologyVersionDoesNotMatchException)
     {
         RefreshTopology();
         return RemoveInternal(removeRequests, backupIndex);
     }
     catch (Exception)
     {
         try
         {
             return RemoveInternal(removeRequests, backupIndex + 1);
         }
         catch (NoMoreBackupsException)
         {
         }
         throw;
     }
 }
Пример #32
0
 private Value[][] GetGetsResults(NodeEndpoint endpoint,
     ExtendedGetRequest[] getRequests,
     int backupIndex)
 {
     try
     {
         using (var client = pool.Create(endpoint))
         {
             return client.Get(topology.Version, getRequests);
         }
     }
     catch (SeeOtherException soe)
     {
         return GetGetsResults(soe.Endpoint, getRequests, backupIndex);
     }
     catch (TopologyVersionDoesNotMatchException)
     {
         RefreshTopology();
         return GetInternal(getRequests, backupIndex);
     }
     catch (Exception)
     {
         try
         {
             return GetInternal(getRequests, backupIndex + 1);
         }
         catch (NoMoreBackupsException)
         {
         }
         throw;
     }
 }
 private static bool MarkSegmentAsAssignedToEndpoint(PersistentHashTableActions actions,
     NodeEndpoint endpoint,
     int segment)
 {
     var result = actions.Put(new PutRequest
     {
         Key = Constants.MovedSegment + segment,
         OptimisticConcurrency = true,
         Bytes = endpoint.ToBytes(),
     });
     return result.ConflictExists == false;
 }
Пример #34
0
 public OnCaughtUp()
 {
     master = new DistributedHashTableMaster();
     endPoint = NodeEndpoint.ForTest(9);
 }
 IDistributedHashTableNodeReplication IDistributedHashTableNodeReplicationFactory.Create(NodeEndpoint endpoint)
 {
     return new DistributedHashTableStorageClient(endpoint).Replication;
 }
 IDistributedHashTableRemoteNode IDistributedHashTableRemoteNodeFactory.Create(NodeEndpoint endpoint)
 {
     return new DistributedHashTableStorageClient(endpoint);
 }
Пример #37
0
 public bool BelongsTo(NodeEndpoint endpoint)
 {
     return endpoint.Equals(AssignedEndpoint) ||
            endpoint.Equals(InProcessOfMovingToEndpoint);
 }
        public ReplicationResult ReplicateNextPage(NodeEndpoint replicationEndpoint,
            ReplicationType type,
            int segment)
        {
            var putRequests = new List<ExtendedPutRequest>();
            var removalRequests = new List<ExtendedRemoveRequest>();
            var done = false;
            hashTable.Batch(actions =>
            {
                foreach (var getRequest in actions.GetKeysForTag(segment))
                {
                    var alreadyReplicated = actions.HasReplicationInfo(getRequest.Key,
                                                                       getRequest.SpecifiedVersion,
                                                                       replicationEndpoint.GetHash());
                    if (alreadyReplicated)
                        continue;

                    var values = actions.Get(getRequest);
                    if (values.Length != 1)
                        continue;
                    var value = values[0];

                    putRequests.Add(new ExtendedPutRequest
                    {
                        Bytes = value.Data,
                        ExpiresAt = value.ExpiresAt,
                        IsReadOnly = value.ReadOnly,
                        IsReplicationRequest = true,
                        Key = value.Key,
                        ParentVersions = value.ParentVersions,
                        ReplicationTimeStamp = value.Timestamp,
                        ReplicationVersion = value.Version,
                        Tag = value.Tag,
                        Segment = value.Tag.Value,
                    });

                    actions.AddReplicationInfo(getRequest.Key,
                                               getRequest.SpecifiedVersion,
                                               replicationEndpoint.GetHash());

                    if (putRequests.Count >= 100)
                        break;
                }

                foreach (var request in actions.ConsumeRemovalReplicationInfo(replicationEndpoint.GetHash()))
                {
                    removalRequests.Add(new ExtendedRemoveRequest
                    {
                        Key = request.Key,
                        SpecificVersion = request.SpecificVersion
                    });
                    if (removalRequests.Count >= 100)
                        break;
                }

                done = putRequests.Count == 0 && removalRequests.Count == 0;
                if (done && type == ReplicationType.Ownership)
                {
                    MarkSegmentAsAssignedToEndpoint(actions, replicationEndpoint, segment);
                }

                actions.Commit();
            });

            return new ReplicationResult
            {
                PutRequests = putRequests.ToArray(),
                RemoveRequests = removalRequests.ToArray(),
                Done = done
            };
        }
Пример #39
0
 public PooledDistributedHashTableStorageClientConnection(
     DefaultConnectionPool pool,
     NodeEndpoint endpoint)
     : base(endpoint)
 {
     this.pool = pool;
 }
 public void Decommision(NodeEndpoint endpoint)
 {
     throw new NotImplementedException();
 }
Пример #41
0
 public bool IsOwnedBy(NodeEndpoint endpoint,
                       int segment)
 {
     return(GetSegment(segment).AssignedEndpoint == endpoint);
 }
        public ReplicationResult ReplicateNextPage(NodeEndpoint replicationEndpoint,
            ReplicationType type,
            int segment)
        {
            writer.Write(new StorageMessageUnion.Builder
            {
                Type = StorageMessageType.ReplicateNextPageRequest,
                ReplicateNextPageRequest = new ReplicateNextPageRequestMessage.Builder
                {
                    ReplicationEndpoint = replicationEndpoint.GetNodeEndpoint(),
                    Segment = segment,
                    Type = type == ReplicationType.Backup? Protocol.ReplicationType.Backup : Protocol.ReplicationType.Ownership
                }.Build()
            }.Build());
            writer.Flush();
            stream.Flush();

            var union = ReadReply(StorageMessageType.ReplicateNextPageResponse);

            return new ReplicationResult
            {
                Done = union.ReplicateNextPageResponse.Done,
                PutRequests = union.ReplicateNextPageResponse.PutRequestsList.Select(
                    x => x.GetPutRequest()
                    ).ToArray(),
                RemoveRequests = union.ReplicateNextPageResponse.RemoveRequestsList.Select(
                    x => x.GetRemoveRequest()
                    ).ToArray()
            };
        }
Пример #43
0
 public bool BelongsTo(NodeEndpoint endpoint)
 {
     return(endpoint.Equals(AssignedEndpoint) ||
            endpoint.Equals(InProcessOfMovingToEndpoint));
 }
 public NotifyEndpointsAboutTopologyChange(NodeEndpoint[] endpoints,
     IDistributedHashTableRemoteNodeFactory distributedHashTableRemoteNodeFactory)
 {
     this.endpoints = endpoints;
     this.distributedHashTableRemoteNodeFactory = distributedHashTableRemoteNodeFactory;
 }
Пример #45
0
 public JoiningMaster()
 {
     master = MockRepository.GenerateStub<IDistributedHashTableMaster>();
     executer = MockRepository.GenerateStub<IExecuter>();
     endPoint = NodeEndpoint.ForTest(1);
     master.Stub(x => x.Join(Arg.Is(endPoint)))
         .Return(new Segment[0]);
     node = new DistributedHashTableNode(master, executer, new BinaryMessageSerializer(), endPoint, MockRepository.GenerateStub<IQueueManager>(),
         MockRepository.GenerateStub<IDistributedHashTableNodeReplicationFactory>());
 }