myMessage MessageQueueI.Add(myMessage m) { if (m == null) { return(null); } if (m.key == null) { m.key = m.data; } lock (this) { if (this.msgBymsgKey.ContainsKey(m.key)) { Program.pe("duplicate detected and discarded."); return(null); } //todo: non dovrebbe succedere mai nell'esecuzione normale, ma forse dovrei usare una collezione che supporti i duplicati //int q = this.queue.Count, ke = this.msgBymsgKey.Count; //if ( this.queue.Count != this.msgBymsgKey.Count) { Program.pe("error before insert element in queue. Q:" + this.queue.Count + this.msgBymsgKey.Count); } this.msgBymsgKey.Add(m.key, m); //bool b1 = queue.Contains(m); /*bool b2 = */ this.queue.Add(m); //bool? b3=null; //int q2 = this.queue.Count; //myMessage alreadyPresent = null; //if (!b2) b3 = this.queue.Remove(alreadyPresent); //if (this.queue.Count == 0 || this.queue.Count != this.msgBymsgKey.Count) { Program.pe("failed to insert element in queue. Q:" + this.queue.Count + this.msgBymsgKey.Count); } } return(m); }
public int CompareTo(object obj0) { if (!(obj0 is myMessage)) { Program.pe("Invalid comparison: myMessage with " + (obj0.GetType())); return(-1); } myMessage obj = (myMessage)obj0; //comparison order: arrivalTime, key, data. int ret = arrivalTime.CompareTo(obj.arrivalTime); if (ret == 0) { if (this.key != null) { ret = obj.key != null?this.key.CompareTo(obj.key) : -1; } else { ret = obj.key == null ? 0 : 1; } } if (ret == 0) { if (this.data == null) { return(obj.data == null ? 0 : -1); } else { ret = obj.data == null ? 1 : this.data.CompareTo(obj.data); } } return(ret); }
public static void sendToAll(myMessage m) { byte[] data = m.toByteArray(); senderToSlave.Send(data, data.Length, Slave.broadcastToSlaveEP); //Program.pe("Exception send???"); //throw new Exception(""); }
private static void operationComplete(ICollection <myMessage> completed, myMessage last, List <ProduceResponse> reply) { foreach (ProduceResponse response in reply) { if (response.Error != 0) { Program.pe("ErrNo " + response.Error + " for msg n° " + response.Offset + " of partition " + response.PartitionId + " in topic " + response.Topic); } } if (Program.args.slaveNotifyMode_Batch == 0) { foreach (myMessage mymsg in completed) { new myMessage(MessageType.confirmMessageSuccess_Single, mymsg.key).launchToOutput(); } } else { new myMessage(MessageType.confirmMessageSuccess_Batch, last.key).launchToOutput(); } if (Program.args.logToolMsgOnReceive == false) { int i = 1; foreach (myMessage mymsg in completed) { Program.LogToolMsg("sentBatch[" + (i++) + "/" + completed.Count + "]: " + mymsg.ToPrintString()); } } //lock(totalSuccessSent) totalSuccessSent += completed.Count; }
ICollection <myMessage> MessageQueueI.getOlderThan(myMessage msg, bool remove) { if (msg == null) { return(new List <myMessage>(0)); } return(((MessageQueueI)this).getOlderThan(msg.arrivalTime - MessageQueue.sicurezzaPerDelayDeiThread, remove)); }
ICollection <myMessage> MessageQueueI.getOlderThan(string key, bool remove) { if (key == null) { return(new List <myMessage>(0)); } lock (this) { myMessage msg = ((MessageQueueI)this).get(key); if (msg == null) { return(new List <myMessage>(0)); } return(((MessageQueueI)this).getOlderThan(msg, remove)); } }
myMessage MessageQueueI.get(string key, bool remove) { myMessage ret = null; lock (this) { this.msgBymsgKey.TryGetValue(key, out ret); if (remove && ret != null) { //if (this.queue.Count != this.msgBymsgKey.Count) { Program.pe("errore before trying to remove element in queue. Q:" + this.queue.Count + this.msgBymsgKey.Count); } this.queue.Remove(ret); this.msgBymsgKey.Remove(key); //if (this.queue.Count != this.msgBymsgKey.Count){ Program.pe("failed to remove element in queue. Q:" + this.queue.Count + this.msgBymsgKey.Count); } } } return(ret); }
ICollection <myMessage> MessageQueueI.getOlderThan(DateTime t, bool remove) { lock (this) { myMessage bot = new myMessage(), top = new myMessage(); bot.arrivalTime = new DateTime(0); top.arrivalTime = t; ICollection <myMessage> ret = this.queue.GetViewBetween(bot, top); if (ret == null) { return(new List <myMessage>(0)); } //todo:_ siccome alla queue può accederci solo uno per volta, fai una copia cache per il sender. if (remove) { foreach (myMessage m in ret) { ((MessageQueueI)this).get(m.key, true); } } return(ret); } }
public static bool masterIsOvercharged() { if (iAmTheMaster) { return(false); // a better check is done while doing master's operations. this is redundant for master. } myMessage oldest = ReceiverTool.messageQueue.getOldest(); if (oldest == null) { return(false); //if my pending list is empty, the master is working great. } bool c1, c2; // c1 & c2: master is working but overcharged. // c1 & !c2: master is working fine. // !c1 & c2: master is assumed dead. (or missed to notify a old message while no new messages are incoming). // !c1 & !c2: master is assumed alive. (status is unknow, because no message are incoming from tools or master). long tmp = Volatile.Read(ref Master.lastMasterUpdate); return((c1 = (DateTime.Now.Ticks - tmp < Master.crashTimeoutInterval.Ticks)) && (c2 = (DateTime.Now - oldest.arrivalTime) > Master.crashTimeoutInterval)); }
public static void send(ICollection <myMessage> mymsgs) { List <KafkaNet.Protocol.Message> arr = new List <KafkaNet.Protocol.Message>(); if (mymsgs.Count == 0) { return; } myMessage last = null; //KafkaNet.Protocol.Message tmp = new KafkaNet.Protocol.Message(); foreach (myMessage mymsg in mymsgs) { arr.Add(new KafkaNet.Protocol.Message(mymsg.data, mymsg.makeKafkaKey())); last = mymsg; } timeout = new TimeSpan(0, 0, 1); Task <List <ProduceResponse> > operationStatus = producer.SendMessageAsync(topicName, arr, 1, timeout, MessageCodec.CodecNone);//todo: can be zipped. if (operationStatus == null) { return; } //operationStatus.Wait(); //List<ProduceResponse> response = operationStatus.Result; //todo: controlla operation status ed esegui la seconda parte solo quando hai ricevuto l'ack, eliminando la wait. //operationComplete(mymsgs, last); object argArr = new object[] { mymsgs, last, operationStatus }; Thread t = new Thread(new ParameterizedThreadStart(WaitOperationComplete0)); t.Name = "Thread Waiting For Kafka Reply"; //WaitOperationComplete(argArr); t.Start(argArr); //todo: System.OutOfMemoryException }
public static bool masterIsCrashedCheck() { if (iAmTheMaster) { Program.pe("Master should not execute \"masterIsCrashedCheck()\"."); } myMessage oldest = ReceiverTool.messageQueue.getOldest(); if (oldest == null) { return(false); //if my pending list is empty, the master is working great. } bool c1, c2; // c1 & c2: master is working but overcharged. // c1 & !c2: master is working fine. // !c1 & c2: master is assumed dead. (or missed to notify a old message while no new messages are incoming). // !c1 & !c2: master is assumed alive. (status is unknow, because no message are incoming from tools or master). long tmp = Volatile.Read(ref Master.lastMasterUpdate); return (!(c1 = (DateTime.Now.Ticks - tmp < Master.crashTimeoutInterval.Ticks)) && (c2 = (DateTime.Now - oldest.arrivalTime) > Master.crashTimeoutInterval)); }
public virtual void receiveBroadcast0(object unused) { //Receiver r = (Receiver)arg; double averageMessageSize = this is ReceiverTool ? ReceiverTool.averageMessageSize : SlaveReceiver.averageMessageSize; myMessage msg; byte[] data = null; try { //con 2500 robot nel simulatore sono arrivato a 78 messaggi al secondo processati, con numeri superiori lo reggo svuotando la coda ma solo 15 al secondo, perchè? Program.p(Thread.CurrentThread.Name + " ready"); bool warningEmitted = false; //bool debugBinary = false; int rcv = 0; //string uselessdebug = Program.pes; //string uselessdeb2 = Program.ps; int ciclo = 1000; //List<myMessage> debugAllProduced = new List<myMessage>(); this.startTime = DateTime.Now; this.accepted = this.discarded = 0; this.messageStart = DateTime.MinValue; this.minMessageTime = TimeSpan.MaxValue; this.maxMessageTime = TimeSpan.MinValue; this.totMessageTime = new TimeSpan(0); Thread me = Thread.CurrentThread; while (true) { //if (ciclo-- == 0) { ciclo = 1000; Thread.Sleep(1000000); throw new Exception("work done"); } //data = Program.fakereceive(); if (Program.args.benchmark) { if (this.messageStart != DateTime.MinValue) { TimeSpan time = DateTime.Now - this.messageStart; this.totMessageTime += time; if (time > this.maxMessageTime) { this.maxMessageTime = time; } if (time < this.minMessageTime) { this.minMessageTime = time; } } } try { //data = Program.fakereceive(); Thread.Sleep(500); data = this.udpClient.Receive(ref this.endpoint); } catch (SocketException ex) { /*likely thread aborted*/; continue; } if (Program.args.benchmark) { this.messageStart = DateTime.Now; } msg = new myMessage(data); //if (!data.arrayToString().Equals("FakeReceive")) throw new Exception("Wrong decoding"); if (Program.args.benchmark) { if (msg.type == MessageType.xml_NotOFMyPartition) { discarded++; this.messageStart = DateTime.MinValue; } else { accepted++; } } if (msg.type == MessageType.xml_NotOFMyPartition) { continue; } if (0.90 <= this.udpClient.Available / (ushort.MaxValue)) { Program.pe("Questo ricevente è sovraccarico al " + ((int)(10000 * (this.udpClient.Available / (ushort.MaxValue)))) / 100 + "%, la perdita pacchetti è imminente e il programma non può funzionare correttamente in queste condizioni." + Environment.NewLine + "Si prega di ridurre il carico su questo ricevente inserendone altri nella rete e ripartizionando." + Environment.NewLine + "byte pending:" + udpClient.Available + "; pacchetti:" + udpClient.Available / averageMessageSize); } if (!warningEmitted && this.udpClient.Available >= (ushort.MaxValue - averageMessageSize * 2) / 10) { warningEmitted = true; Program.p("warning sovraccarico al " + ((int)(10000 * (this.udpClient.Available / (ushort.MaxValue - averageMessageSize)))) / 100 + "%. byte pending:" + udpClient.Available + "; pacchetti:" + udpClient.Available / averageMessageSize); } if (rcv++ % 20 == 0) { Program.p(Thread.CurrentThread.Name + ") receive pending message:" + (this.udpClient.Available / averageMessageSize) + "; pending byte:" + this.udpClient.Available + "; load:" + ((int)(10000 * (this.udpClient.Available / (ushort.MaxValue)))) / 100 + "%"); } //debugAllProduced.Add(msg); if (msg.type == MessageType.xml) { ReceiverTool.messageQueue.Add(msg); //if (ReceiverTool.messageQueue.Count == 0) throw new Exception(); Master.canPublish.Release(1); if (Program.args.logToolMsgOnReceive == true) { Program.LogToolMsg(me.Name + "Rcv: " + msg); } } else { SlaveReceiver.SlaveMessageQueue.Add(msg); //if (SlaveReceiver.SlaveMessageQueue.Count == 0) throw new Exception(); SlaveMsgConsumer.canConsume.Release(1); //Program.logSlave("Rcv: " + msg); } //StartupArgJson debug = Program.args; //EndPoint ee = this.endpoint; if (msg.type == MessageType.xml && (me.Name == null || me.Name[0] == 'I')) { Program.pe("Got xml data on the slave receiver (msg was sent on wrong broadacast port)"); continue; } if (msg.type != MessageType.xml && (me.Name == null || me.Name[0] == 'T')) { Program.pe("Got non-xml data on the tool receiver (msg was sent on wrong broadacast port)"); continue; } //string raw = Encoding.UTF8.GetString(data); //if (debugBinary) { raw = "| "; foreach (byte b in data) { raw += b + " "; } raw += "|"; } /* * myMessage message = new myMessage(raw); * message.consume(raw); * Master.canPublish.Release(); * if (Master.iAmTheMaster){ * Master.canSendMessage.Release(); * Master.emptyMessageBuffer(); * //send from buffer attivando un altro thread * }*/ } } catch (ThreadAbortException e) { Program.p("Thread receiver (" + this.GetType() + ") aborted; Last data handled:" + (data == null ? "none" : data.arrayToString()), e); if (!Program.args.benchmark) { return; } string s = getstatistics(); MessageBox.Show(s); Program.p(s); return; } }
/// <summary> /// processing executed after message got dequeued. /// </summary> public void consume() { Program.logSlave("consuming :" + this.ToPrintString()); Slave s; ulong id; int removedCount; switch (type) { case MessageType.argumentChange: StartupArgJson args; try { args = StartupArgJson.deserialize(this.data); } catch (Exception e) { Program.pe(this.type + " body is not deserializable: " + this.data, e); break; } if (args == null || !args.Validate()) { Program.pe(this.type + " body is deserializable but with invalid content: " + this.data); break; } Program.args = args; Master.MasterCrashChecker.Abort(); Master.MasterCrashChecker = null; new Thread(Program.HotRestart).Start(); /* * string[] arr = this.data.Split(myMessage.separator); * foreach (string str in arr) { * string[] kv = str.Split(myMessage.secondSeparator); * ulong slaveID; * if (!ulong.TryParse(kv[0], out slaveID)) { Program.pe("Unexpected slaveID key ("+kv[0]+") found in the body of messagetype."+this.type); continue; } * * }*/ //todo: crea anche un software che generi messaggi di ripartizionamento per gestire dinamicamente tutte le partizioni, un supermaster break; case MessageType.masterChange: //if required in future trigger messageType.dinamicallyAddSlave, per ora va tutto bene anche se il nuovo master non era nella lista slaves. string[] split = this.data.Split(myMessage.separator); if (!ulong.TryParse(split[0], out id)) { Program.pe(this.type + " have non-numerical body; expected two numeric id separated by a '" + myMessage.separator + "', found instead: " + this.data); break; } Slave oldMaster = Slave.getFromID(id); if (oldMaster == null) { break; } if (!ulong.TryParse(split[0], out id)) { Program.pe(this.type + " have non-numerical body; expected two numeric id separated by a '" + myMessage.separator + "', found instead: " + this.data); break; } Slave newMaster = Slave.getFromID(id); if (newMaster == null) { break; } if (Master.currentMaster == oldMaster) { Master.changeMaster(newMaster); //master checker msg received } Slave.Remove(oldMaster); break; case MessageType.dinamicallyRemoveSlave: if (!ulong.TryParse(this.data, out id)) { Program.pe(this.type + " have non-numerical body; expected numeric id, found: " + this.data); break; } s = Slave.getFromID(id); if (s == null) { break; } Slave.Remove(s); break; case MessageType.dinamicallyAddSlave: Slave.deserializeOrGet(this.data); if (Master.iAmTheMaster) { myMessage m = new myMessage(MessageType.provideSlaveList, ""); foreach (Slave s2 in Slave.all) { m.data += ";" + s2.serialize(); } m.data = m.data.Substring(1); m.launchToOutput(); } break; case MessageType.provideSlaveList: Volatile.Write(ref Master.lastMasterUpdate, DateTime.Now.Ticks); string[] jsons = this.data.Split(myMessage.separator); lock (Slave.all) foreach (string str in jsons) { Slave.deserializeOrGet(str); } break; case MessageType.confirmMessageSuccess_Single: Volatile.Write(ref Master.lastMasterUpdate, DateTime.Now.Ticks); removedCount = ReceiverTool.messageQueue.get(this.data, true) == null ? 0 : 1; Program.logSlave(removedCount + " removed from queue."); break; case MessageType.confirmMessageSuccess_Batch: Volatile.Write(ref Master.lastMasterUpdate, DateTime.Now.Ticks); removedCount = ReceiverTool.messageQueue.getOlderThan(this.data, true).Count; Program.logSlave(removedCount + " removed from queue."); break; case MessageType.xml: Program.pe("xml messages should be handled in Master thread without consuming."); return; default: case MessageType.uninitialized: Program.pe("uninitialized message consumed"); return; } }