public Thread invoke(object action_id, Action reaction, out IActionFuture future) { int partner_size = channel.RemoteSize; int value = ActionDef.action_ids[action_id]; MPI.RequestList request_list = new MPI.RequestList(); for (int i = 0; i < partner_size; i++) { MPI.Request req = channel.ImmediateSend <object>(value, i, value); request_list.Add(req); } for (int i = 0; i < partner_size; i++) { MPI.ReceiveRequest req = channel.ImmediateReceive <object>(i, value); request_list.Add(req); } ManualResetEvent sync = new ManualResetEvent(false); ActionFuture future_ = new ActionFuture(request_list, sync); future = future_; Thread t = new Thread(new ThreadStart(() => handle_request(future_, sync, reaction))); t.Start(); return(t); }
public void some_method_1(int arg1, int arg2, int arg3, IScatter <int> arg4, IScatter <int> arg5, IScatter <int> arg6) { MPI.RequestList reqList = new MPI.RequestList(); sendArguments(OPERATION_1, arg1, arg2, arg3, arg4, arg5, arg6, ref reqList); receiveResult <int> (ref reqList, OPERATION_1); reqList.WaitAll(); }
private void receiveResult <T> (ref MPI.RequestList reqList, int operation_tag) { int remote_size = channel.RemoteSize; for (int server = 0; server < remote_size; server++) { MPI.Request req = channel.ImmediateReceive <T> (server, operation_tag); reqList.Add(req); } }
void returnResult(int[] result, int operation_tag) { MPI.RequestList reqList = new MPI.RequestList(); int remote_size = channel.RemoteSize; for (int client = 0; client < remote_size; client++) { MPI.Request req = channel.ImmediateSend <int> (result[client], client, operation_tag); reqList.Add(req); } reqList.WaitAll(); }
public IScan <int> some_method_7(int arg1, int arg2, int arg3, IScatter <int> arg4, IScatter <int> arg5, IScatter <int> arg6) { MPI.RequestList reqList = new MPI.RequestList(); sendArguments(OPERATION_7, arg1, arg2, arg3, arg4, arg5, arg6, ref reqList); receiveResult <int> (ref reqList, OPERATION_7); IList <MPI.Request> reqList_complete = reqList.WaitAll(); int[] result_values = takeResults(reqList_complete); return(Scan <int> .create(channel, result_values, sum, 0)); }
public override void main() { int remote_size = channel.RemoteSize; while (true) { MPI.RequestList reqList = new MPI.RequestList(); int operation_tag = channel.Receive <int> (0, OPERATION_TAG); for (int client = 0; client < remote_size; client++) { MPI.ReceiveRequest req = channel.ImmediateReceive <Tuple <int, int, int, int, int, int> > (client, operation_tag); reqList.Add(req); } IList <MPI.Request> reqList_complete = reqList.WaitAll(); switch (operation_tag) { case OPERATION_1: handleOperation1(reqList_complete); break; case OPERATION_2: handleOperation2(reqList_complete); break; case OPERATION_3: handleOperation3(reqList_complete); break; case OPERATION_4: handleOperation4(reqList_complete); break; case OPERATION_5: handleOperation5(reqList_complete); break; case OPERATION_6: handleOperation6(reqList_complete); break; case OPERATION_7: handleOperation7(reqList_complete); break; } } }
public void invoke(object action_id) { int partner_size = channel.RemoteSize; int value = ActionDef.action_ids[action_id]; MPI.RequestList request_list = new MPI.RequestList(); for (int i = 0; i < partner_size; i++) { MPI.Request req = channel.ImmediateSend <object>(value, i, value); request_list.Add(req); } for (int i = 0; i < partner_size; i++) { MPI.ReceiveRequest req = channel.ImmediateReceive <object>(i, value); request_list.Add(req); } Console.WriteLine(channel.Rank + ": BEFORE WAIT ALL"); request_list.WaitAll(); Console.WriteLine(channel.Rank + ": AFTER WAIT ALL"); }
private void sendArguments(int operation_tag, int arg1, int arg2, int arg3, IScatter <int> arg4, IScatter <int> arg5, IScatter <int> arg6, ref MPI.RequestList reqList) { int remote_size = channel.RemoteSize; for (int server = 0; server < remote_size; server++) { if (channel.Rank == 0) { channel.Send <int> (operation_tag, server, OPERATION_TAG); } int arg1_ = arg1; int arg2_ = arg2; int arg3_ = arg3; int arg4_ = arg4.Value[server]; int arg5_ = arg5.Value[server]; int arg6_ = arg6.Value[server]; Tuple <int, int, int, int, int, int> send_value = new Tuple <int, int, int, int, int, int> (arg1_, arg2_, arg3_, arg4_, arg5_, arg6_); MPI.Request req = channel.ImmediateSend <Tuple <int, int, int, int, int, int> > (send_value, server, operation_tag); reqList.Add(req); } }
public ActionFuture(MPI.RequestList request_list, ManualResetEvent sync) { this.request_list = request_list; this.sync = sync; }
public ActionFuture(MPI.RequestList request_list) { this.request_list = request_list; }
public override void main() { int count = 0; Trace.WriteLine(WorldComm.Rank + ": STARTING SCATTER SPLIT DATA SOURCE #1"); Bin_function.NumberOfPartitions = this.UnitSize["target"]; IIteratorInstance <IKVPair <IMK, IMV> > bins_instance = (IIteratorInstance <IKVPair <IMK, IMV> >)Bins.Instance; int[] rank_workers = this.UnitRanks["target"]; Trace.WriteLine(WorldComm.Rank + ": STARTING SCATTER SPLIT DATA SOURCE #2"); // 1. Ler os bins, um a um, do iterator, e enviá-los a cada mapper (unidades target) usando MPI. object bins_object; while (bins_instance.fetch_next(out bins_object)) { Trace.WriteLine(WorldComm.Rank + ": LOOP BIN " + (bins_object == null)); // Ler um bin. IKVPairInstance <IMK, IMV> bin = (IKVPairInstance <IMK, IMV>)bins_object; Trace.WriteLine(bin.Key.GetType() + " +++++ " + Key.Instance.GetType()); // Recuperar a chave do bin. Key.Instance = bin.Key; // Descobre o rank do Mapper. Trace.WriteLine(WorldComm.Rank + ": BEFORE BIN FUNCTION " + bins_instance.GetHashCode()); Bin_function.go(); Trace.WriteLine(WorldComm.Rank + ": AFTER BIN FUNCTION"); int i = (int)((IIntegerInstance)Rank.Instance).Value; int rank = rank_workers[i]; // Inicia o envio do bin para o Mapper. Trace.WriteLine(WorldComm.Rank + ": BEGIN SEND BIN KEY/VALUE to " + rank + "cont=" + (count++)); comm.Send <object> (bin.Key, rank, TAG_SPLITTER_IMK); //Trace.WriteLine(WorldComm.Rank + ": SEND BIN KEY OK to " + rank); comm.Send <object> (bin.Value, rank, TAG_SPLITTER_IMV); //Trace.WriteLine(WorldComm.Rank + ": SEND BIN VALUE OK to " + rank); Trace.WriteLine(WorldComm.Rank + ": END SEND BIN KEY/VALUE to " + rank + "cont=" + (count++)); } Trace.WriteLine(Rank + ": FINISH LOOP SEND BINS !!!"); // send "finish" message MPI.RequestList requests = new MPI.RequestList(); foreach (int i in rank_workers) { Trace.WriteLine(WorldComm.Rank + ": BEGIN SEND BIN FINISH OK to " + i); MPI.Request request = comm.ImmediateSend <object> (0, i, TAG_SPLITTER_IMK_FINISH); Trace.WriteLine(WorldComm.Rank + ": END SEND BIN FINISH OK to " + i); requests.Add(request); } requests.WaitAll(); // Trace.WriteLine(WorldComm.Rank + ": SEND BIN FINISH OK ALL "); //requestList.WaitAll(); }