public MergeData(Address sender, View view, Digest digest) { this.sender = sender; this.view = view; this.digest = digest; }
public override void down(Event evt) { Message msg; long time_to_wait, start_time; switch (evt.Type) { case Event.FIND_INITIAL_MBRS: // sent by GMS layer, pass up a GET_MBRS_OK event //We pass this event down to tcp so that it can take some measures. passDown(evt); initial_members.Clear(); msg = new Message(null, null, null); msg.putHeader(HeaderType.TCPPING, new PingHeader(PingHeader.GET_MBRS_REQ, (System.Object)local_addr,group_addr)); // if intitial nodes have been specified and static is true, then only those // members will form the cluster, otherwise, nodes having the same IP Multicast and port // will form the cluster dyanamically. mbrDiscoveryInProcess = true; lock (members.SyncRoot) { if( initial_hosts != null) { for (System.Collections.IEnumerator it = initial_hosts.GetEnumerator(); it.MoveNext(); ) { Address addr = (Address) it.Current; msg.Dest = addr; if(Stack.NCacheLog.IsInfoEnabled) Stack.NCacheLog.Info("[FIND_INITIAL_MBRS] sending PING request to " + msg.Dest); passDown(new Event(Event.MSG_URGENT, msg.copy(), Priority.Critical)); } } } // 2. Wait 'timeout' ms or until 'num_initial_members' have been retrieved if(Stack.NCacheLog.IsInfoEnabled) Stack.NCacheLog.Info("TcpPing.down()", "[FIND_INITIAL_MBRS] waiting for results..............."); lock (initial_members.SyncRoot) { start_time = (System.DateTime.Now.Ticks - 621355968000000000) / 10000; time_to_wait = timeout; while (initial_members.Count < num_initial_members && time_to_wait > 0) { try { if (Stack.NCacheLog.IsInfoEnabled) Stack.NCacheLog.Info("TcpPing.down()", "initial_members Count: " + initial_members.Count + "initialHosts Count: " + num_initial_members); if (Stack.NCacheLog.IsInfoEnabled) Stack.NCacheLog.Info("TcpPing.down()", "Time to wait for next response: " + time_to_wait); ///Big_clusterd: initial members will be pulsed in case connection is not available. ///so here we dont have to wait till each member is timed out. ///this significantly improves time for initial member discovery. bool timeExpire = System.Threading.Monitor.Wait(initial_members.SyncRoot, TimeSpan.FromMilliseconds(time_to_wait)); } catch (System.Exception e) { Stack.NCacheLog.Error("TCPPing.down(FIND_INITIAL_MBRS)", e.ToString()); } time_to_wait = timeout - ((System.DateTime.Now.Ticks - 621355968000000000) / 10000 - start_time); } mbrDiscoveryInProcess = false; } if(Stack.NCacheLog.IsInfoEnabled) Stack.NCacheLog.Info("TcpPing.down()", "[FIND_INITIAL_MBRS] initial members are " + Global.CollectionToString(initial_members)); if(Stack.NCacheLog.IsInfoEnabled) Stack.NCacheLog.Info("TcpPing.down()", "[FIND_INITIAL_MBRS] initial members count " + initial_members.Count); //remove those which are not functional due to twoPhaseConnect for (int i = initial_members.Count - 1; i >= 0; i--) { PingRsp rsp = initial_members[i] as PingRsp; if (!rsp.IsStarted) initial_members.RemoveAt(i); } // 3. Send response passUp(new Event(Event.FIND_INITIAL_MBRS_OK, initial_members)); break; case Event.TMP_VIEW: case Event.VIEW_CHANGE: System.Collections.ArrayList tmp; if ((tmp = ((View) evt.Arg).Members) != null) { lock (members.SyncRoot) { members.Clear(); members.AddRange(tmp); } } passDown(evt); break; /****************************After removal of NackAck *********************************/ //TCPPING emulates a GET_DIGEST call, which is required by GMS. This is needed //since we have now removed NAKACK from the stack! case Event.GET_DIGEST: pbcast.Digest digest = new pbcast.Digest(members.Count); for (int i = 0; i < members.Count; i++) { Address sender = (Address)members[i]; digest.add(sender, 0, 0); } passUp(new Event(Event.GET_DIGEST_OK, digest)); return; case Event.SET_DIGEST: // Not needed! Just here to let you know that it is needed by GMS! return; /********************************************************************************/ case Event.BECOME_SERVER: // called after client has joined and is fully working group member if(Stack.NCacheLog.IsInfoEnabled) Stack.NCacheLog.Info("TcpPing.down()", "received BECOME_SERVER event"); passDown(evt); is_server = true; break; case Event.CONNECT: object[] addrs = ((object[])evt.Arg); group_addr = (string)addrs[0]; subGroup_addr = (string)addrs[1]; twoPhaseConnect = (bool)addrs[3]; if (twoPhaseConnect) timeout = 1000; passDown(evt); break; case Event.DISCONNECT: passDown(evt); break; case Event.HAS_STARTED: hasStarted = true; passDown(evt); break; default: passDown(evt); // Pass on to the layer below us break; } }
public override void down(Event evt) { Message msg; long time_to_wait, start_time; switch (evt.Type) { case Event.FIND_INITIAL_MBRS: // sent by GMS layer, pass up a GET_MBRS_OK event //We pass this event down to tcp so that it can take some measures. passDown(evt); initial_members.Clear(); msg = new Message(null, null, null); msg.putHeader(HeaderType.TCPPING, new PingHeader(PingHeader.GET_MBRS_REQ, (System.Object)local_addr, group_addr)); // if intitial nodes have been specified and static is true, then only those // members will form the cluster, otherwise, nodes having the same IP Multicast and port // will form the cluster dyanamically. mbrDiscoveryInProcess = true; lock (members.SyncRoot) { if (initial_hosts != null) { for (System.Collections.IEnumerator it = initial_hosts.GetEnumerator(); it.MoveNext();) { Address addr = (Address)it.Current; msg.Dest = addr; if (Stack.NCacheLog.IsInfoEnabled) { Stack.NCacheLog.Info("[FIND_INITIAL_MBRS] sending PING request to " + msg.Dest); } passDown(new Event(Event.MSG_URGENT, msg.copy(), Priority.Critical)); } } } // 2. Wait 'timeout' ms or until 'num_initial_members' have been retrieved if (Stack.NCacheLog.IsInfoEnabled) { Stack.NCacheLog.Info("TcpPing.down()", "[FIND_INITIAL_MBRS] waiting for results..............."); } lock (initial_members.SyncRoot) { start_time = (System.DateTime.Now.Ticks - 621355968000000000) / 10000; time_to_wait = timeout; while (initial_members.Count < num_initial_members && time_to_wait > 0) { try { if (Stack.NCacheLog.IsInfoEnabled) { Stack.NCacheLog.Info("TcpPing.down()", "initial_members Count: " + initial_members.Count + "initialHosts Count: " + num_initial_members); } if (Stack.NCacheLog.IsInfoEnabled) { Stack.NCacheLog.Info("TcpPing.down()", "Time to wait for next response: " + time_to_wait); } ///Big_clusterd: initial members will be pulsed in case connection is not available. ///so here we dont have to wait till each member is timed out. ///this significantly improves time for initial member discovery. bool timeExpire = System.Threading.Monitor.Wait(initial_members.SyncRoot, TimeSpan.FromMilliseconds(time_to_wait)); } catch (System.Exception e) { Stack.NCacheLog.Error("TCPPing.down(FIND_INITIAL_MBRS)", e.ToString()); } time_to_wait = timeout - ((System.DateTime.Now.Ticks - 621355968000000000) / 10000 - start_time); } mbrDiscoveryInProcess = false; } if (Stack.NCacheLog.IsInfoEnabled) { Stack.NCacheLog.Info("TcpPing.down()", "[FIND_INITIAL_MBRS] initial members are " + Global.CollectionToString(initial_members)); } if (Stack.NCacheLog.IsInfoEnabled) { Stack.NCacheLog.Info("TcpPing.down()", "[FIND_INITIAL_MBRS] initial members count " + initial_members.Count); } //remove those which are not functional due to twoPhaseConnect for (int i = initial_members.Count - 1; i >= 0; i--) { PingRsp rsp = initial_members[i] as PingRsp; if (!rsp.IsStarted) { initial_members.RemoveAt(i); } } // 3. Send response passUp(new Event(Event.FIND_INITIAL_MBRS_OK, initial_members)); break; case Event.TMP_VIEW: case Event.VIEW_CHANGE: System.Collections.ArrayList tmp; if ((tmp = ((View)evt.Arg).Members) != null) { lock (members.SyncRoot) { members.Clear(); members.AddRange(tmp); } } passDown(evt); break; /****************************After removal of NackAck *********************************/ //TCPPING emulates a GET_DIGEST call, which is required by GMS. This is needed //since we have now removed NAKACK from the stack! case Event.GET_DIGEST: pbcast.Digest digest = new pbcast.Digest(members.Count); for (int i = 0; i < members.Count; i++) { Address sender = (Address)members[i]; digest.add(sender, 0, 0); } passUp(new Event(Event.GET_DIGEST_OK, digest)); return; case Event.SET_DIGEST: // Not needed! Just here to let you know that it is needed by GMS! return; /********************************************************************************/ case Event.BECOME_SERVER: // called after client has joined and is fully working group member if (Stack.NCacheLog.IsInfoEnabled) { Stack.NCacheLog.Info("TcpPing.down()", "received BECOME_SERVER event"); } passDown(evt); is_server = true; break; case Event.CONNECT: object[] addrs = ((object[])evt.Arg); group_addr = (string)addrs[0]; subGroup_addr = (string)addrs[1]; twoPhaseConnect = (bool)addrs[3]; if (twoPhaseConnect) { timeout = 1000; } passDown(evt); break; case Event.DISCONNECT: passDown(evt); break; case Event.HAS_STARTED: hasStarted = true; passDown(evt); break; default: passDown(evt); // Pass on to the layer below us break; } }
/// <summary> Adds a digest to this digest. This digest must have enough space to add the other digest; otherwise an error /// message will be written. For each sender in the other digest, the merge() method will be called. /// </summary> public void merge(Digest d) { Address sender; long low_seqno, high_seqno, high_seqno_seen; if (d == null) { return ; } for (int i = 0; i < d.size(); i++) { sender = d.senderAt(i); low_seqno = d.lowSeqnoAt(i); high_seqno = d.highSeqnoAt(i); high_seqno_seen = d.highSeqnoSeenAt(i); merge(sender, low_seqno, high_seqno, high_seqno_seen); } }
public Digest copy() { Digest ret = new Digest(senders.Length); // changed due to JDK bug (didn't work under JDK 1.4.{1,2} under Linux, JGroups bug #791718 if (senders != null) Array.Copy(senders, 0, ret.senders, 0, senders.Length); ret.low_seqnos = new long[low_seqnos.Length]; low_seqnos.CopyTo(ret.low_seqnos, 0); ret.high_seqnos = new long[high_seqnos.Length]; high_seqnos.CopyTo(ret.high_seqnos, 0); ret.high_seqnos_seen = new long[high_seqnos_seen.Length]; high_seqnos_seen.CopyTo(ret.high_seqnos_seen, 0); return ret; }
/// <summary> Merge all digests into one. For each sender, the new value is min(low_seqno), max(high_seqno), /// max(high_seqno_seen) /// </summary> internal virtual Digest consolidateDigests(System.Collections.ArrayList v, int num_mbrs) { MergeData data; Digest tmp_digest, retval = new Digest(num_mbrs); for (int i = 0; i < v.Count; i++) { data = (MergeData)v[i]; tmp_digest = data.Digest; if (tmp_digest == null) { gms.Stack.NCacheLog.Error("tmp_digest == null; skipping"); continue; } retval.merge(tmp_digest); } return retval; }
/// <summary> Does nothing. Discards all views while still client.</summary> public override void handleViewChange(View new_view, Digest digest) { lock (this) { gms.Stack.NCacheLog.Debug("pb.ClientGmsImpl.handleViewChange()", "view " + Global.CollectionToString(new_view.Members) + " is discarded as we are not a participant"); } gms.passDown(new Event(Event.VIEW_CHANGE_OK, new object(),Priority.Critical)); }
/// <summary> Joins this process to a group. Determines the coordinator and sends a unicast /// handleJoin() message to it. The coordinator returns a JoinRsp and then broadcasts the new view, which /// contains a message digest and the current membership (including the joiner). The joiner is then /// supposed to install the new view and the digest and starts accepting mcast messages. Previous /// mcast messages were discarded (this is done in PBCAST).<p> /// If successful, impl is changed to an instance of ParticipantGmsImpl. /// Otherwise, we continue trying to send join() messages to the coordinator, /// until we succeed (or there is no member in the group. In this case, we create our own singleton group). /// <p>When GMS.disable_initial_coord is set to true, then we won't become coordinator on receiving an initial /// membership of 0, but instead will retry (forever) until we get an initial membership of > 0. /// </summary> /// <param name="mbr">Our own address (assigned through SET_LOCAL_ADDRESS) /// </param> public override void join(Address mbr) { Address coord = null; Address last_tried_coord = null; JoinRsp rsp = null; Digest tmp_digest = null; leaving = false; int join_retries = 1; join_promise.Reset(); while (!leaving) { findInitialMembers(); gms.Stack.NCacheLog.Debug("pb.ClientGmsImpl.join()", "initial_mbrs are " + Global.CollectionToString(initial_mbrs)); if (initial_mbrs.Count == 0) { if (gms.disable_initial_coord) { gms.Stack.NCacheLog.Debug("pb.ClientGmsImpl.join()", "received an initial membership of 0, but cannot become coordinator (disable_initial_coord=" + gms.disable_initial_coord + "), will retry fetching the initial membership"); continue; } gms.Stack.NCacheLog.CriticalInfo("ClientGmsImpl.Join", "no initial members discovered: creating group as first member."); becomeSingletonMember(mbr); return; } coord = determineCoord(initial_mbrs); if (coord == null) { gms.Stack.NCacheLog.Error("pb.ClientGmsImpl.join()", "could not determine coordinator from responses " + Global.CollectionToString(initial_mbrs)); continue; } if (coord.CompareTo(gms.local_addr) == 0) { gms.Stack.NCacheLog.Error("pb.ClientGmsImpl.join()", "coordinator anomaly. More members exist yet i am the coordinator " + Global.CollectionToString(initial_mbrs)); ArrayList members = new ArrayList(); for (int i = 0; i < initial_mbrs.Count; i++) { PingRsp ping_rsp = (PingRsp)initial_mbrs[i]; if (ping_rsp.OwnAddress != null && gms.local_addr != null && !ping_rsp.OwnAddress.Equals(gms.local_addr)) { members.Add(ping_rsp.OwnAddress); } } gms.InformOthersAboutCoordinatorDeath(members, coord); if (last_tried_coord == null) { last_tried_coord = coord; } else { if (last_tried_coord.Equals(coord)) { join_retries++; } else { last_tried_coord = coord; join_retries = 1; } } Util.Util.sleep(gms.join_timeout); continue; } try { gms.Stack.NCacheLog.Debug("pb.ClientGmsImpl.join()", "sending handleJoin(" + mbr + ") to " + coord); if (last_tried_coord == null) { last_tried_coord = coord; } else { if (last_tried_coord.Equals(coord)) { join_retries++; } else { last_tried_coord = coord; join_retries = 1; } } sendJoinMessage(coord, mbr, gms.subGroup_addr); rsp = (JoinRsp)join_promise.WaitResult(gms.join_timeout); gms._doReDiscovery = false; //block the re-discovery of members as we have found initial members if (rsp == null) { if (join_retries >= gms.join_retry_count) { gms.Stack.NCacheLog.Error("ClientGmsImpl.Join", "received no joining response after " + join_retries + " tries, so becoming a singlton member"); becomeSingletonMember(mbr); //Console.WriteLine("4"); return; } else { //I did not receive join response, so there is a chance that coordinator is down //Lets verifiy it. if (gms.VerifySuspect(coord, false)) { if (gms.Stack.NCacheLog.IsErrorEnabled) { gms.Stack.NCacheLog.CriticalInfo("ClientGmsImpl.Join()", "selected coordinator " + coord + " seems down; Lets inform others"); } //Coordinator is not alive;Lets inform the others ArrayList members = new ArrayList(); for (int i = 0; i < initial_mbrs.Count; i++) { PingRsp ping_rsp = (PingRsp)initial_mbrs[i]; if (ping_rsp.OwnAddress != null && gms.local_addr != null && !ping_rsp.OwnAddress.Equals(gms.local_addr)) { members.Add(ping_rsp.OwnAddress); } } gms.InformOthersAboutCoordinatorDeath(members, coord); } } gms.Stack.NCacheLog.Error("ClientGmsImpl.Join()", "handleJoin(" + mbr + ") failed, retrying; coordinator:" + coord + " ;No of retries : " + (join_retries + 1)); } else { if (rsp.JoinResult == JoinResult.Rejected) { gms.Stack.NCacheLog.Error("ClientGmsImpl.Join", "joining request rejected by coordinator"); becomeSingletonMember(mbr); return; } if (rsp.JoinResult == JoinResult.MembershipChangeAlreadyInProgress) { gms.Stack.NCacheLog.CriticalInfo("Coord.CheckOwnClusterHealth", "Reply: JoinResult.MembershipChangeAlreadyInProgress"); Util.Util.sleep(gms.join_timeout); continue; } gms.Stack.NCacheLog.Debug("pb.ClientGmsImpl.join()", "Join successfull"); // 1. Install digest tmp_digest = rsp.Digest; if (tmp_digest != null) { tmp_digest.incrementHighSeqno(coord); // see DESIGN for an explanantion gms.Stack.NCacheLog.Debug("pb.ClientGmsImpl.join()", "digest is " + tmp_digest); gms.Digest = tmp_digest; } else { gms.Stack.NCacheLog.Error("pb.ClientGmsImpl.join()", "digest of JOIN response is null"); } // 2. Install view gms.Stack.NCacheLog.Debug("pb.ClientGmsImpl.join()", "[" + gms.local_addr + "]: JoinRsp=" + rsp.View + " [size=" + rsp.View.size() + "]\n\n"); if (rsp.View != null) { if (!installView(rsp.View)) { gms.Stack.NCacheLog.Error("pb.ClientGmsImpl.join()", "view installation failed, retrying to join group"); continue; } gms.Stack.IsOperational = true; return; } else { gms.Stack.NCacheLog.Error("pb.ClientGmsImpl.join()", "view of JOIN response is null"); } } } catch (System.Exception e) { gms.Stack.NCacheLog.Error("ClientGmsImpl.join()", "Message: " + e.Message + " StackTrace: " + e.StackTrace + ", retrying"); } Util.Util.sleep(gms.join_retry_timeout); } }
/// <summary> Sets the new view and sends a VIEW_CHANGE event up and down the stack. If the view is a MergeView (subclass /// of View), then digest will be non-null and has to be set before installing the view. /// </summary> public virtual void installView(View new_view, Digest digest) { if (digest != null) mergeDigest(digest); installView(new_view); }
/// <summary>Send down a MERGE_DIGEST event </summary> public virtual void mergeDigest(Digest d) { passDown(new Event(Event.MERGE_DIGEST, d)); }
public virtual void castViewChange(View new_view, Digest digest) { Message view_change_msg; HDR hdr; Stack.NCacheLog.Debug("pb.GMS.castViewChange()", "mcasting view {" + new_view + "} (" + new_view.size() + " mbrs)\n"); if (new_view != null) new_view.BridgeSourceCacheId = impl.UniqueId; view_change_msg = new Message(); // bcast to all members hdr = new HDR(HDR.VIEW, new_view); hdr.digest = digest; view_change_msg.putHeader(HeaderType.GMS, hdr); view_change_msg.Dests = new_view.Members.Clone() as ArrayList; if(stack.NCacheLog.IsInfoEnabled) Stack.NCacheLog.Info("CastView.Watch", "Count of members: " + new_view.Members.Count.ToString()); //TODO: we need to handle scenario when we dont recive castView change from a node _promise = new ViewPromise(new_view.Members.Count); bool waitForViewAcknowledgement = true ; if (!new_view.containsMember(local_addr)) //i am leaving { waitForViewAcknowledgement = false; if (Stack.NCacheLog.IsInfoEnabled) Stack.NCacheLog.Info("GMS.castViewChange()", "I am coordinator and i am leaving"); passDown(new Event(Event.MSG, view_change_msg, Priority.Critical)); } else passDown(new Event(Event.MSG, view_change_msg, Priority.Critical)); if (waitForViewAcknowledgement) { _promise.WaitResult(_castViewChangeTimeOut); if (!_promise.AllResultsReceived()) //retry { view_change_msg.Dests = new_view.Members.Clone() as ArrayList; passDown(new Event(Event.MSG, view_change_msg, Priority.Critical)); _promise.WaitResult(_castViewChangeTimeOut); } if (_promise.AllResultsReceived()) { Stack.NCacheLog.CriticalInfo("GMS.castViewChange()", "View applied"); } } }
void ICompactSerializable.Deserialize(CompactReader reader) { type = reader.ReadByte(); view = View.ReadView(reader); mbr = Address.ReadAddress(reader); join_rsp = (JoinRsp)reader.ReadObject(); digest = (Digest)reader.ReadObject(); merge_id = reader.ReadObject(); merge_rejected = reader.ReadBoolean(); subGroup_name = reader.ReadString(); nodeList = reader.ReadObject() as ArrayList; arg = reader.ReadObject(); isStartedAsMirror = reader.ReadBoolean(); gms_id = reader.ReadObject() as string; }
/// <summary>This method is overridden to avoid hanging on getDigest(): when a JOIN is received, the coordinator needs /// to retrieve the digest from the PBCAST layer. It therefore sends down a GET_DIGEST event, to which the PBCAST layer /// responds with a GET_DIGEST_OK event.<p> /// However, the GET_DIGEST_OK event will not be processed because the thread handling the JOIN request won't process /// the GET_DIGEST_OK event until the JOIN event returns. The receiveUpEvent() method is executed by the up-handler /// thread of the lower protocol and therefore can handle the event. All we do here is unblock the mutex on which /// JOIN is waiting, allowing JOIN to return with a valid digest. The GET_DIGEST_OK event is then discarded, because /// it won't be processed twice. /// </summary> public override void receiveUpEvent(Event evt) { if (evt.Type == Event.GET_DIGEST_OK) { lock (digest_mutex) { digest = (Digest)evt.Arg; System.Threading.Monitor.PulseAll(digest_mutex); } return; } base.receiveUpEvent(evt); }
/// <summary> Computes the new view (including the newly joined member) and get the digest from PBCAST. /// Returns both in the form of a JoinRsp /// </summary> public override JoinRsp handleJoin(Address mbr, string subGroup_name, bool isStartedAsMirror, string gmsId, ref bool acquireHashmap) { lock (this) { System.Collections.ArrayList new_mbrs = System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(1)); View v = null; Digest d, tmp; gms.Stack.NCacheLog.CriticalInfo("CoordGmsImpl.handleJoin", "mbr=" + mbr); if (gms.local_addr.Equals(mbr)) { gms.Stack.NCacheLog.Error("CoordGmsImpl.handleJoin", "cannot join myself !"); return null; } if (gms.members.contains(mbr)) { gms.Stack.NCacheLog.Error("CoordGmsImpl.handleJoin()", "member " + mbr + " already present; returning existing view " + Global.CollectionToString(gms.members.Members)); acquireHashmap = false; View view = new View(gms.view_id, gms.members.Members); view.CoordinatorGmsId = gms.unique_id; JoinRsp rsp = new JoinRsp(view, gms.Digest); rsp.View.SequencerTbl = gms._subGroupMbrsMap; rsp.View.MbrsSubgroupMap = gms._mbrSubGroupMap; return rsp; // already joined: return current digest and membership } new_mbrs.Add(mbr); //===================================== // update the subGroupMbrsMap and mbrSubGroupMap if (gms._subGroupMbrsMap.Contains(subGroup_name)) { lock (gms._subGroupMbrsMap.SyncRoot) { System.Collections.ArrayList groupMbrs = (System.Collections.ArrayList)gms._subGroupMbrsMap[subGroup_name]; if (!groupMbrs.Contains(mbr)) groupMbrs.Add(mbr); } } else { lock (gms._subGroupMbrsMap.SyncRoot) { System.Collections.ArrayList groupMbrs = new System.Collections.ArrayList(); groupMbrs.Add(mbr); gms._subGroupMbrsMap[subGroup_name] = groupMbrs; } } if (!gms._mbrSubGroupMap.Contains(mbr)) { lock (gms._mbrSubGroupMap.SyncRoot) { gms._mbrSubGroupMap[mbr] = subGroup_name; } } //===================================== tmp = gms.Digest; // get existing digest if (tmp == null) { gms.Stack.NCacheLog.Error("CoordGmsImpl.handleJoin", "received null digest from GET_DIGEST: will cause JOIN to fail"); return null; } gms.Stack.NCacheLog.Debug("got digest=" + tmp); d = new Digest(tmp.size() + 1); // create a new digest, which contains 1 more member d.add(tmp); // add the existing digest to the new one d.add(mbr, 0, 0); // ... and add the new member. it's first seqno will be 1 v = gms.getNextView(new_mbrs, null, null); v.SequencerTbl = gms._subGroupMbrsMap; v.MbrsSubgroupMap = gms._mbrSubGroupMap; v.AddGmsId(mbr, gmsId); //add coordinator own's gms id[bug fix]; so that new member could know cordinator id v.AddGmsId(gms.local_addr, gms.unique_id); if (gms.GmsIds != null) { Hashtable gmsIds = gms.GmsIds.Clone() as Hashtable; IDictionaryEnumerator ide = gmsIds.GetEnumerator(); while (ide.MoveNext()) { v.AddGmsId((Address)ide.Key,(string) ide.Value); } } gms.Stack.NCacheLog.Debug("joined member " + mbr + ", view is " + v); return new JoinRsp(v, d); } }
public abstract void handleViewChange(View new_view, Digest digest);
/// <summary> Called by the GMS when a VIEW is received.</summary> /// <param name="new_view">The view to be installed /// </param> /// <param name="digest"> If view is a MergeView, digest contains the seqno digest of all members and has to /// be set by GMS /// </param> public override void handleViewChange(View new_view, Digest digest) { System.Collections.ArrayList mbrs = new_view.Members; if (digest != null) { gms.Stack.NCacheLog.Debug("view=" + new_view + ", digest=" + digest); } else { gms.Stack.NCacheLog.Debug("view=" + new_view); } if (leaving && !mbrs.Contains(gms.local_addr)) return; gms.installView(new_view, digest); lock (viewRejectingMembers.SyncRoot) { //we handle the request of those nodes who have rejected our this view Address rejectingMbr; for (int i = 0; i < viewRejectingMembers.Count; i++) { rejectingMbr = viewRejectingMembers[i] as Address; handleViewRejected(rejectingMbr); } viewRejectingMembers.Clear(); } }
public JoinRsp(View v, Digest d) { view = v; digest = d; }
/// <summary> Send back a response containing view and digest to sender</summary> internal virtual void sendMergeResponse(Address sender, View view, Digest digest) { Message msg = new Message(sender, null, null); GMS.HDR hdr = new GMS.HDR(GMS.HDR.MERGE_RSP); hdr.merge_id = merge_id; hdr.view = view; hdr.digest = digest; msg.putHeader(HeaderType.GMS, hdr); gms.Stack.NCacheLog.Debug("response=" + hdr); gms.passDown(new Event(Event.MSG, msg)); }
public JoinRsp(View v, Digest d, JoinResult result) { view = v; digest = d; joinResult = result; }
internal virtual void becomeSingletonMember(Address mbr) { Digest initial_digest; ViewId view_id = null; ArrayList mbrs = ArrayList.Synchronized(new ArrayList(1)); // set the initial digest (since I'm the first member) initial_digest = new Digest(1); // 1 member (it's only me) initial_digest.add(gms.local_addr, 0, 0); // initial seqno mcast by me will be 1 (highest seen +1) gms.Digest = initial_digest; view_id = new ViewId(mbr); // create singleton view with mbr as only member mbrs.Add(mbr); View v = new View(view_id, mbrs); v.CoordinatorGmsId = gms.unique_id; ArrayList subgroupMbrs = new ArrayList(); subgroupMbrs.Add(mbr); gms._subGroupMbrsMap[gms.subGroup_addr] = subgroupMbrs; gms._mbrSubGroupMap[mbr] = gms.subGroup_addr; v.SequencerTbl = gms._subGroupMbrsMap.Clone() as Hashtable; v.MbrsSubgroupMap = gms._mbrSubGroupMap.Clone() as Hashtable; v.AddGmsId(mbr, gms.unique_id); gms.installView(v); gms.becomeCoordinator(); // not really necessary - installView() should do it gms.Stack.IsOperational = true; gms.Stack.NCacheLog.Debug("pb.ClientGmsImpl.becomeSingletonMember()", "created group (first member). My view is " + gms.view_id + ", impl is " + gms.Impl.GetType().FullName); }
public void Deserialize(CompactReader reader) { view = reader.ReadObject() as View; digest = reader.ReadObject() as Digest; joinResult = (JoinResult)reader.ReadObject(); }
/// <summary> Compares two digests and returns true if the senders are the same, otherwise false</summary> /// <param name="">other /// </param> /// <returns> /// </returns> public bool sameSenders(Digest other) { Address a1, a2; if (other == null) return false; if (this.senders == null || other.senders == null) return false; if (this.senders.Length != other.senders.Length) return false; for (int i = 0; i < this.senders.Length; i++) { a1 = this.senders[i]; a2 = other.senders[i]; if (a1 == null && a2 == null) continue; if (a1 != null && a2 != null && a1.Equals(a2)) continue; else return false; } return true; }
public void add(Digest d) { Address sender; long low_seqno, high_seqno, high_seqno_seen; if (d != null) { for (int i = 0; i < d.size(); i++) { sender = d.senderAt(i); low_seqno = d.lowSeqnoAt(i); high_seqno = d.highSeqnoAt(i); high_seqno_seen = d.highSeqnoSeenAt(i); add(sender, low_seqno, high_seqno, high_seqno_seen); } } }
/// <summary> If we are leaving, we have to wait for the view change (last msg in the current view) that /// excludes us before we can leave. /// </summary> /// <param name="new_view">The view to be installed /// </param> /// <param name="digest"> If view is a MergeView, digest contains the seqno digest of all members and has to /// be set by GMS /// </param> public override void handleViewChange(View new_view, Digest digest) { if (gms.Stack.NCacheLog.IsInfoEnabled) gms.Stack.NCacheLog.Info("ParticipentGMSImpl.handleViewChange", "received view"); System.Collections.ArrayList mbrs = new_view.Members; gms.Stack.NCacheLog.Debug("view=");// + new_view); suspected_mbrs.Clear(); if (leaving && !mbrs.Contains(gms.local_addr)) { // received a view in which I'm not member: ignore return ; } ViewId vid = gms.view_id != null ? gms.view_id.Copy() : null; if (vid != null) { int rc = vid.CompareTo(new_view.Vid); if (rc < 0) { isNewMember = false; if (gms.Stack.NCacheLog.IsInfoEnabled) gms.Stack.NCacheLog.Info("ParticipantGmsImp", "isNewMember : " + isNewMember); } } gms.installView(new_view, digest); }
void ICompactSerializable.Deserialize(CompactReader reader) { sender = (Address)reader.ReadObject(); merge_rejected = reader.ReadBoolean(); if (!merge_rejected) { view = (View)reader.ReadObject(); digest = (Digest)reader.ReadObject(); } }