public string sincronize() { string candidates_buffer = ""; for (int i = 0; i < this.Communicator.Size; i++) { if (i != this.Communicator.Rank) { request.Add(this.Communicator.ImmediateSend <string> (Reduce_function.CandidatesBuffer [i], i, 0)); Reduce_function.CandidatesBuffer [i] = ""; } } for (int i = 0; i < this.Communicator.Size; i++) { if (i != this.Communicator.Rank) { Reduce_function.CandidatesBuffer[this.Communicator.Rank] = Reduce_function.CandidatesBuffer[this.Communicator.Rank] + this.Communicator.Receive <string> (i, 0); } } request.WaitAll(); candidates_buffer = Reduce_function.CandidatesBuffer[this.Communicator.Rank]; active = this.Communicator.Allreduce <int> (active, MPI.Operation <int> .Max); Reduce_function.clearCandidatesBuffer(); return(candidates_buffer); }
public void some_method_1(int arg1, int arg2, int arg3, IScatter <int> arg4, IScatter <int> arg5, IScatter <int> arg6) { MPI.RequestList reqList = new MPI.RequestList(); sendArguments(OPERATION_1, arg1, arg2, arg3, arg4, arg5, arg6, ref reqList); receiveResult <int> (ref reqList, OPERATION_1); reqList.WaitAll(); }
void returnResult(int[] result, int operation_tag) { MPI.RequestList reqList = new MPI.RequestList(); int remote_size = channel.RemoteSize; for (int client = 0; client < remote_size; client++) { MPI.Request req = channel.ImmediateSend <int> (result[client], client, operation_tag); reqList.Add(req); } reqList.WaitAll(); }
public IScan <int> some_method_7(int arg1, int arg2, int arg3, IScatter <int> arg4, IScatter <int> arg5, IScatter <int> arg6) { MPI.RequestList reqList = new MPI.RequestList(); sendArguments(OPERATION_7, arg1, arg2, arg3, arg4, arg5, arg6, ref reqList); receiveResult <int> (ref reqList, OPERATION_7); IList <MPI.Request> reqList_complete = reqList.WaitAll(); int[] result_values = takeResults(reqList_complete); return(Scan <int> .create(channel, result_values, sum, 0)); }
public override void main() { int remote_size = channel.RemoteSize; while (true) { MPI.RequestList reqList = new MPI.RequestList(); int operation_tag = channel.Receive <int> (0, OPERATION_TAG); for (int client = 0; client < remote_size; client++) { MPI.ReceiveRequest req = channel.ImmediateReceive <Tuple <int, int, int, int, int, int> > (client, operation_tag); reqList.Add(req); } IList <MPI.Request> reqList_complete = reqList.WaitAll(); switch (operation_tag) { case OPERATION_1: handleOperation1(reqList_complete); break; case OPERATION_2: handleOperation2(reqList_complete); break; case OPERATION_3: handleOperation3(reqList_complete); break; case OPERATION_4: handleOperation4(reqList_complete); break; case OPERATION_5: handleOperation5(reqList_complete); break; case OPERATION_6: handleOperation6(reqList_complete); break; case OPERATION_7: handleOperation7(reqList_complete); break; } } }
public void invoke(object action_id) { int partner_size = channel.RemoteSize; int value = ActionDef.action_ids[action_id]; MPI.RequestList request_list = new MPI.RequestList(); for (int i = 0; i < partner_size; i++) { MPI.Request req = channel.ImmediateSend <object>(value, i, value); request_list.Add(req); } for (int i = 0; i < partner_size; i++) { MPI.ReceiveRequest req = channel.ImmediateReceive <object>(i, value); request_list.Add(req); } Console.WriteLine(channel.Rank + ": BEFORE WAIT ALL"); request_list.WaitAll(); Console.WriteLine(channel.Rank + ": AFTER WAIT ALL"); }
public override void main() { int count = 0; Trace.WriteLine(WorldComm.Rank + ": STARTING SCATTER SPLIT DATA SOURCE #1"); Bin_function.NumberOfPartitions = this.UnitSize["target"]; IIteratorInstance <IKVPair <IMK, IMV> > bins_instance = (IIteratorInstance <IKVPair <IMK, IMV> >)Bins.Instance; int[] rank_workers = this.UnitRanks["target"]; Trace.WriteLine(WorldComm.Rank + ": STARTING SCATTER SPLIT DATA SOURCE #2"); // 1. Ler os bins, um a um, do iterator, e enviá-los a cada mapper (unidades target) usando MPI. object bins_object; while (bins_instance.fetch_next(out bins_object)) { Trace.WriteLine(WorldComm.Rank + ": LOOP BIN " + (bins_object == null)); // Ler um bin. IKVPairInstance <IMK, IMV> bin = (IKVPairInstance <IMK, IMV>)bins_object; Trace.WriteLine(bin.Key.GetType() + " +++++ " + Key.Instance.GetType()); // Recuperar a chave do bin. Key.Instance = bin.Key; // Descobre o rank do Mapper. Trace.WriteLine(WorldComm.Rank + ": BEFORE BIN FUNCTION " + bins_instance.GetHashCode()); Bin_function.go(); Trace.WriteLine(WorldComm.Rank + ": AFTER BIN FUNCTION"); int i = (int)((IIntegerInstance)Rank.Instance).Value; int rank = rank_workers[i]; // Inicia o envio do bin para o Mapper. Trace.WriteLine(WorldComm.Rank + ": BEGIN SEND BIN KEY/VALUE to " + rank + "cont=" + (count++)); comm.Send <object> (bin.Key, rank, TAG_SPLITTER_IMK); //Trace.WriteLine(WorldComm.Rank + ": SEND BIN KEY OK to " + rank); comm.Send <object> (bin.Value, rank, TAG_SPLITTER_IMV); //Trace.WriteLine(WorldComm.Rank + ": SEND BIN VALUE OK to " + rank); Trace.WriteLine(WorldComm.Rank + ": END SEND BIN KEY/VALUE to " + rank + "cont=" + (count++)); } Trace.WriteLine(Rank + ": FINISH LOOP SEND BINS !!!"); // send "finish" message MPI.RequestList requests = new MPI.RequestList(); foreach (int i in rank_workers) { Trace.WriteLine(WorldComm.Rank + ": BEGIN SEND BIN FINISH OK to " + i); MPI.Request request = comm.ImmediateSend <object> (0, i, TAG_SPLITTER_IMK_FINISH); Trace.WriteLine(WorldComm.Rank + ": END SEND BIN FINISH OK to " + i); requests.Add(request); } requests.WaitAll(); // Trace.WriteLine(WorldComm.Rank + ": SEND BIN FINISH OK ALL "); //requestList.WaitAll(); }