/// <summary>
        ///   Entry point for executing a time series model.
        ///   Must only be called on the master process.
        /// </summary>
        /// <param name="parameters"> the parameter set for the model run </param>
        public void Execute(MpiSysConfig parameters)
        {
            if (IsSlave)
            {
                throw new InvalidOperationException("This method can only be called on the root process");
            }

            Log.DebugFormat("Root: ***** commencing run {0} *****", Iterations);

            // Tell the slaves to expect some work
            MpiWorkPacket workPacket = new MpiWorkPacket(SlaveActions.DoWork, parameters);

            // Gather the catchment results (one result set per catchment)
            Log.Debug("Root: broadcasting work packets");
            WorldBroadcast(ref workPacket, 0);
            Log.Debug("Root: waiting for catchment results");
            catchmentScores = WorldGatherFlattened(new MpiObjectiveScores[0], NumCatchmentResultsPerWorker, 0);
            Debug.Assert(catchmentScores.Length == GlobalDefinition.Count);
            Log.DebugFormat("Root: {0} catchment results are in", catchmentScores.Length);

            // debug info about the ordering of catchments that can be expected in the final results
            if (Log.IsInfoEnabled && IsFirstRun)
            {
                IsFirstRun = false;
                StringBuilder sb = new StringBuilder();
                sb.Append("Catchment orders: ");
                foreach (var catchmentScore in catchmentScores)
                {
                    sb.AppendFormat("{0}, ", catchmentScore.CatchmentId);
                }
                Log.Info(sb);
            }

            Iterations++;
        }
Exemple #2
0
        /// <summary>
        ///   This method must be called by the root process, as RunSlaveSystem sits in an infinite loop.
        ///   Only the root process containing the optimiser knows when the work is complete.
        /// </summary>
        public void TerminateSlaves()
        {
            // this is required because the Slaves sit in an infinite loop waiting for a new parameter set.
            Log.Info("Root: Shutting down MPI processes");
            MpiWorkPacket workPacket = new MpiWorkPacket(SlaveActions.Terminate);

            evaluator.WorldBroadcast(ref workPacket, 0);
            //Communicator.world.Broadcast(ref workPacket, 0);
        }
        internal override void WorldBroadcast(ref MpiWorkPacket workPacket, int root)
        {
            // Communicator.world.Broadcast(ref workPacket, root);
            if (this.rank == 0)
            {
                CatchmentResults = new List <MpiObjectiveScores>();
                if (workPacket.Command == SlaveActions.DoWork)
                {
                    // I don't think we can just call DoWork, as happens in the MPI layer (TODO: confirm the intent in the MPI implementation with Daniel).
                    var partialCatchmentResultsByCatchmentIds = new Dictionary <string, SerializableDictionary <string, MpiTimeSeries> > [instances.Count - 1];
                    var parameters = workPacket.Parameters;
                    for (int i = 1; i < instances.Count; i++)
                    {
                        // execute our list of models, accumulating the results into the appropriate partial result buffer.
#if CELL_WEIGHTED_SUMS
                        partialCatchmentResultsByCatchmentIds[i - 1] = EvaluateModels(parameters);
#else
                        Dictionary <string, List <SerializableDictionary <string, MpiTimeSeries> > > partialCatchmentResultsByCatchmentId = EvaluateModels(parameters);
#endif
                    }

                    for (int i = 1; i < instances.Count; i++)
                    {
                        // For each catchment, accumulate the partial results for each catchment back to the catchment-coordinator.
                        MpiObjectiveScores[] finalCatchmentResults = AccumulateCatchmentResultsInCatchmentCoordinator(
                            partialCatchmentResultsByCatchmentIds[i - 1], parameters);
                        WorldGatherFlattened(finalCatchmentResults, 0);
                    }

                    // OK, all catchment results have been accumulated. Time to send the catchment results back to world root.
                    //Log.DebugFormat("Rank {0}: submitting {1} final catchment results to master", WorldRank, finalCatchmentResults.Length);
                }
            }
            else
            {
                throw new NotSupportedException("SerialGriddedCatchmentObjectiveEvaluator only support world broadcast from the master");
            }
        }
Exemple #4
0
        /// <summary>
        ///   Entry point for running a slave process.
        /// </summary>
        public override void RunSlave()
        {
            Log.DebugFormat("Rank {0}: RunSlave", WorldRank);

            if (IsMaster)
            {
                throw new InvalidOperationException("This method can only be called on the slave processes");
            }

            MpiWorkPacket workPacket = new MpiWorkPacket(SlaveActions.Nothing);

            while (workPacket.Command != SlaveActions.Terminate)
            {
                // Check for instructions
                Log.DebugFormat("Rank {0}: waiting for work", WorldRank);
                WorldBroadcast(ref workPacket, 0);
                Log.DebugFormat("Rank {0}: {1}", WorldRank, SlaveActions.ActionNames[workPacket.Command]);

                if (workPacket.Command == SlaveActions.DoWork)
                {
                    DoWork(workPacket.Parameters);
                }
            }
        }
 internal abstract void WorldBroadcast(ref MpiWorkPacket workPacket, int root);
Exemple #6
0
 internal override void WorldBroadcast(ref MpiWorkPacket workPacket, int root)
 {
     Communicator.world.Broadcast(ref workPacket, root);
 }