public void TestQueue() { // Arrange Problem testProblem = new Problem(3, 5, 4); byte[] qMsgBytes = new byte[12]; Buffer.BlockCopy(BitConverter.GetBytes(testProblem.FirstBucket.Capacity), 0, qMsgBytes, 0, 4); Buffer.BlockCopy(BitConverter.GetBytes(testProblem.SecondBucket.Capacity), 0, qMsgBytes, 4, 4); Buffer.BlockCopy(BitConverter.GetBytes(testProblem.GoalWaterVolume), 0, qMsgBytes, 8, 4); CloudQueueMessage qMessage = new CloudQueueMessage(qMsgBytes); // Act _queue.AddMessage(qMessage); Thread.Sleep(6000); // Assert // Assertion done by watching the WorkerRole in Debugger }
// Deprecated message for SignedUpdates with the theory of having a single ZmqSocket subscribe to all // messages for a problem and then separate/parse them by which strategy the message is an update for // and pass that appropriately to the client(s) //protected IEnumerable<SignedProblemUpdate> GetProblemUpdates(ZmqSocket subscriber, string signature) //{ // if (subscriber == null) // throw new ArgumentNullException("subscriber"); // SignedProblemUpdate update; // do // { // update = null; // // This is the blocking version of Receiving from a ZMQ Socket - this is okay since we will wrap this call in Rx // ZmqMessage msg = subscriber.ReceiveMessage(); // // To decouple ProblemUpdate from the Transport Medium, only binary data is passed using byte[] instead of a ZeroMQ.Frame // update = new SignedProblemUpdate(msg.Select(f => f.Buffer).ToArray()); // if (!signature.Equals(update.Signature)) // continue; // if (update.IsAction || update.IsInitial) // yield return update; // } while ((update != null) && !(update.IsCompletion || update.IsError)); // if (update != null) // { // if (update.IsCompletion) // { // yield return update; // } // else if (update.IsError) // { // throw update.GetException<Exception>(); // } // } // yield break; //} /// <summary> /// Solve the Problem using a WorkerRole hosted in Azure PaaS Cloud asynchronously by queuing the problem in an Azure Storage CloudQueue and /// listening on ZmqSockets for update messages from the Worker doing the work as the work is being done. /// <para>Based on a setting by the Client in the WorkOrder optionally use an Rx Observable sequences over the message stream to signal /// updates to the Client(s)</para> /// </summary> /// <param name="problemToSolve"><see cref="WaterBucket.Domain.Problem"/> to be solved and calculate the results</param> /// <param name="work">The <see cref="WaterBucketWeb.Models.WorkOrder"/> passed from the Client determining how to perform the work of solving the <see cref="WaterBucket.Domain.Problem"/></param> /// <returns>Task that can be awaited on for work being completed by the Worker</returns> /// <seealso cref="Microsoft.WindowsAzure.Storage.Queue.CloudQueue"/> /// <seealso cref="ZeroMQ"/> protected async Task SolveProblemWithWorkerAsync(Problem problemToSolve, WorkOrder work)//, int delayStart, int workDelay) { // Calling this here will ensure that it is trying to find workers anytime it needs them // rather than prefetching PublisherAddresses, finding out they're not initialized and // never being able to use workers after that // - fortunately or unfortunately, it doesn't matter in SignalR since Hubs are reconstructed // for every call to the Hub so the result if called in the ctor wouldn't be cached accross calls //InitPublisherAddresses(); //if (!CanUseWorker) //{ // Clients.All.addMsg("Cannot use Worker for calculations, reason: " + NoWorkerReason); // return; //} // Check whether the CloudQueue is for the staging or production environment, which use separate Queues in order to provide fully isolated environments bool isStaging = RoleEnvironmentExt.GetRoleConfigSetting("UseStaging", false); // Get the CloudQueue on which we want to put messages for the WorkerRoles to get their work string storageAccountConStr = !isStaging ? RoleEnvironment.GetConfigurationSettingValue("StorageAccount") : RoleEnvironment.GetConfigurationSettingValue("StagingStorageAccount"); CloudStorageAccount storageAccount = CloudStorageAccount.Parse(storageAccountConStr); CloudQueueClient qClient = storageAccount.CreateCloudQueueClient(); CloudQueue queue = qClient.GetQueueReference(CLOUD_Q_FOR_WORK); queue.CreateIfNotExists(); try { ZmqContext ctx = ZmqContext.Create(); //ZmqSocket socket = ctx.CreateSocket(SocketType.SUB); //socket.Connect(publishAddress); //List<ISolutionStrategy> strategies = new List<ISolutionStrategy>(); //strategies.Add(new BigToSmallSingleBucketSolutionStrategy(problemToSolve)); //strategies.Add(new SmallToBigSingleBucketSolutionStrategy(problemToSolve)); //IObservable<SignedProblemUpdate> combinedUpdates = null; //foreach (var s in strategies) //{ // socket.Subscribe(Encoding.UTF8.GetBytes(s.Signature)); // if (combinedUpdates == null) // { // combinedUpdates = GetProblemUpdates(socket, s.Signature).ToObservable(); // } // else // { // combinedUpdates = combinedUpdates.Union(GetProblemUpdates(socket, s.Signature).ToObservable()); // } //} ISolutionStrategy bigToSmall = new BigToSmallSingleBucketSolutionStrategy(problemToSolve); ISolutionStrategy smallToBig = new SmallToBigSingleBucketSolutionStrategy(problemToSolve); //socket.Subscribe(Encoding.UTF8.GetBytes(bigToSmall.Signature)); //socket.Subscribe(Encoding.UTF8.GetBytes(smallToBig.Signature)); //var combinedUpdates = (from u in GetProblemUpdates(socket, bigToSmall.Signature) // select u) // .Union( // from u in GetProblemUpdates(socket, smallToBig.Signature) // select u); //bool done = false; //using (combinedUpdates.ToObservable().Subscribe( // update => // { // if (update.IsAction || update.IsInitial) // { // ProblemActionStep(update.IntoType<BucketActionStep>().UpdateState); // } // else if (update.IsCompletion) // { // ISolutionStrategy signedStrategy = strategies.FirstOrDefault(s => update.Signature.Equals(s.Signature)); // if (signedStrategy != null) // { // var solnResult = update.IntoType<SolutionResult>().UpdateState; // ProblemSolutionCompleted(signedStrategy.StrategyName); // // Don't need this with the bool done // signedStrategy.RemoteResult(solnResult); // SignalSolution(signedStrategy.StrategyName, solnResult); // } // //if (bigToSmall.Signature.Equals(update.Signature)) // //{ // // SignalSolution(bigToSmall.StrategyName, update.IntoType<SolutionResult>().UpdateState); // //} // //else if (smallToBig.Signature.Equals(update.Signature)) // //{ // // SignalSolution(smallToBig.StrategyName, update.IntoType<SolutionResult>().UpdateState); // //} // } // else if (update.IsError) // { // ProblemActionError(update.GetException<Exception>()); // } // }, // ProblemActionError, // () => { done = true; } // )) //{ // byte[] qMsgBytes = new byte[12]; // Buffer.BlockCopy(BitConverter.GetBytes(problemToSolve.FirstBucket.Capacity), 0, qMsgBytes, 0, 4); // Buffer.BlockCopy(BitConverter.GetBytes(problemToSolve.SecondBucket.Capacity), 0, qMsgBytes, 4, 4); // Buffer.BlockCopy(BitConverter.GetBytes(problemToSolve.GoalWaterVolume), 0, qMsgBytes, 8, 4); // CloudQueueMessage qMsg = new CloudQueueMessage(qMsgBytes); // queue.AddMessage(qMsg); // while (!done) // { // Thread.Sleep(500); // } //} Task b2sWork, s2bWork; // Put the work.YieldWeb value in a local value type which is easier to pass to child Tasks bool yieldOnAction = work.YieldWeb; // The Client does NOT want to use a Rx Observable to wrap the message stream from the ZmqSocket if (!work.ObserverOnZmq) { // Start tasks to listen for Worker updates on their own ZmqSockets per each SolutionStrategy that will be employed to solve the problem and signal Client(s) as they come in b2sWork = Task.Run(() => { try { foreach (var step in GetWorkerUpdates(ctx, bigToSmall, result => SignalSolution(bigToSmall.StrategyName, result))) { ProblemActionStep(step, yieldOnAction); } ProblemSolutionCompleted("Big to Small"); } catch (Exception ex) { ProblemActionError(ex); } }); s2bWork = Task.Run(() => { try { foreach (var step in GetWorkerUpdates(ctx, smallToBig, result => SignalSolution(smallToBig.StrategyName, result))) { ProblemActionStep(step, yieldOnAction); } ProblemSolutionCompleted("Small to Big"); } catch (Exception ex) { ProblemActionError(ex); } }); } else // The Client wants to use a Rx Observable to wrap the message stream from the ZmqSocket { // First create a lazy evaluated Linq Sequence around getting ZmqSocket subscription messages for a specific SolutionStrategy var b2sUpdates = from u in GetWorkerUpdates(ctx, bigToSmall, result => SignalSolution(bigToSmall.StrategyName, result)) select u; // Start a task in which a Rx Observable will wrap the message stream and signal the Client(s) when messages are received b2sWork = Task.Run(() => b2sUpdates.ToObservable() .Subscribe(step => ProblemActionStep(step, yieldOnAction), ProblemActionError, () => ProblemSolutionCompleted("Big to Small"))) .ContinueWith(t => { if (t.IsCanceled) { // TODO: Log this } else if (t.IsFaulted) { // TODO: Log the Exception } else if (t.IsCompleted) { t.Result.Dispose(); } }); // Repeat the above steps for the other SolutionStrategy var s2bUpdates = from u in GetWorkerUpdates(ctx, smallToBig, result => SignalSolution(smallToBig.StrategyName, result)) select u; s2bWork = Task.Run(() => s2bUpdates.ToObservable() .Subscribe(step => ProblemActionStep(step, yieldOnAction), ProblemActionError, () => ProblemSolutionCompleted("Small to Big"))) .ContinueWith(t => { if (t.IsFaulted) { // TODO: Log the Exception } else if (t.IsCanceled) { // TODO: Log this } else if (t.IsCompleted) { t.Result.Dispose(); } }); } // To ensure the Subscriptions occur before any messages are published by the Worker, we could call Thread.Yield here // and we will get back control once those Tasks block on attempting to receive a message //Thread.Yield(); // BUT - we want to see the effects of delays on publishing and the Zmq PUB-SUB messaging so we're not doing that now // Create the message to be put into the Azure Storage CloudQueue that is picked up by the Workers byte[] qMsgBytes = new byte[20]; Buffer.BlockCopy(BitConverter.GetBytes(problemToSolve.FirstBucket.Capacity), 0, qMsgBytes, 0, 4); Buffer.BlockCopy(BitConverter.GetBytes(problemToSolve.SecondBucket.Capacity), 0, qMsgBytes, 4, 4); Buffer.BlockCopy(BitConverter.GetBytes(problemToSolve.GoalWaterVolume), 0, qMsgBytes, 8, 4); Buffer.BlockCopy(BitConverter.GetBytes(work.StartDelay), 0, qMsgBytes, 12, 4); // be able to signal to the worker the WorkDelay or whether it should Yield on each Action (workDelay == 0) or Not to Yield (workDelay < 0) int workDelay = work.WorkDelay > 0 ? work.WorkDelay : work.YieldWorker ? 0 : -1; Buffer.BlockCopy(BitConverter.GetBytes(workDelay), 0, qMsgBytes, 16, 4); CloudQueueMessage qMsg = new CloudQueueMessage(qMsgBytes); queue.AddMessage(qMsg); // Now wait for the Subscription tasks to complete await Task.WhenAll(b2sWork, s2bWork); } catch (Exception ex) { // TODO: Log any uncaught exception throw new Exception("Encountered Exception during WorkerAsync", ex); } }
/// <summary> /// Solve the Problem locally in the WebRole/Hub asynchronously using Rx Observable sequences to capture and signal updates to the Client(s) /// </summary> /// <param name="problemToSolve"><see cref="WaterBucket.Domain.Problem"/> to be solved and calculate the results</param> /// <param name="work">The <see cref="WaterBucketWeb.Models.WorkOrder"/> passed from the Client determining how to perform the work of solving the <see cref="WaterBucket.Domain.Problem"/></param> /// <returns>Task resulting in <see cref="WaterBucket.Domain.SolutionResult"/>s for the problem for each <see cref="WaterBucket.Domain.ISolutionStrategy"/> employed</returns> protected async Task<SolutionResult[]> SolveAProblemAsync(Problem problemToSolve, WorkOrder work)//, int delayStart, int workDelay) { if (work.StartDelay > 0) Thread.Sleep(work.StartDelay); ISolutionStrategy smallToBig, bigToSmall; smallToBig = new SmallToBigSingleBucketSolutionStrategy(problemToSolve); bigToSmall = new BigToSmallSingleBucketSolutionStrategy(problemToSolve); var startSmall = from step in problemToSolve.Solve(smallToBig) select step; //startSmall.ToObservable().Subscribe(ProblemActionStep, ProblemActionError, ProblemSolutionCompleted); // Example of doing all of the code on a single line //problemToSolve.Solve(new SmallToBigSingleBucketSolutionStrategy(problemToSolve)).ToObservable().Subscribe(ProblemActionStep, ProblemActionError, () => ProblemSolutionCompleted("Small to Big")); // Capture as primitive value type rather than rely on reference type for use in Tasks - was burned on this with the PublisherAddresses list bool yieldOnAction = work.YieldWeb; // Set up a Task to run the Observable in but don't wait on it yet var smallToBigTask = Task.Run(() => (work.WorkDelay > 0 ? startSmall.ToObservable().Do(_ => Thread.Sleep(work.WorkDelay)) : startSmall.ToObservable()) .Subscribe(step => ProblemActionStep(step, yieldOnAction), ProblemActionError, () => ProblemSolutionCompleted("Small to Big"))) .ContinueWith(t => { if (t.IsCanceled) { } else if (t.IsFaulted) { } else if (t.IsCompleted) { t.Result.Dispose(); SignalSolution(smallToBig.StrategyName, smallToBig.Result); return smallToBig.Result; } return null; }); var startBig = from step in problemToSolve.Solve(bigToSmall) select step; //var startBig = (from step in problemToSolve.Solve(bigToSmall) // select step).ToObservable(); //if (workDelay > 0) //{ // var timer = Observable.Interval(TimeSpan.FromMilliseconds(workDelay)); // startBig = startBig.Zip(timer, (bs, _) => bs); //} // Set up a Task to run the Observable in but don't wait on it yet var bigToSmallTask = Task.Run(() => (work.WorkDelay > 0 ? startBig.ToObservable().Do(_ => Thread.Sleep(work.WorkDelay)) : startBig.ToObservable()) .Subscribe(step => ProblemActionStep(step, yieldOnAction), ProblemActionError, () => ProblemSolutionCompleted("Big to Small"))) .ContinueWith(t => { if (t.IsCanceled) { } else if (t.IsFaulted) { } else if (t.IsCompleted) { t.Result.Dispose(); SignalSolution(bigToSmall.StrategyName, bigToSmall.Result); return smallToBig.Result; } return null; }); // Wait for the Observable execution tasks to complete before finishing return await Task.WhenAll(smallToBigTask, bigToSmallTask); }
/// <summary> /// Get a message from the Client to Solve a given problem using the given work order /// </summary> /// <param name="problem"><see cref="WaterBucket.Domain.Problem"/> defining the Bucket and Goal Parameters to use in the calculation</param> /// <param name="work">WorkOrder parameters used during execution of the solution</param> public void Solve(ProblemVM problem, WorkOrder work)//, bool useWorker = false, int delayStart = -1, int workDelay = -1) { if (BroadcastAll) { Clients.All.problemSubmitted(ObservableOnJavaScript); Clients.Others.submission(problem, work.UseWorker); } else { Clients.Caller.problemSubmitted(ObservableOnJavaScript); } // Test Bucket Ranges here instead of at the browser so that updates made on the back end for thresholds are checked without requiring browser refresh bool end = false; int bigBucket = Math.Max(problem.FirstBucketCapacity, problem.SecondBucketCapacity); if (bigBucket > BucketMax) { if (BroadcastAll) { Clients.All.outOfRange(bigBucket, true); } else { Clients.Caller.outOfRange(bigBucket, true); } end = true; } int smallBucket = Math.Min(problem.FirstBucketCapacity, problem.SecondBucketCapacity); if (smallBucket < BucketMin) { if (BroadcastAll) { Clients.All.outOfRange(smallBucket, false); } else { Clients.Caller.outOfRange(smallBucket, false); } end = true; } if (end) return; // Check to see if the Problem exists in the Cache Problem problemo = HttpContext.Current.Cache[problem.CacheKey] as Problem; // If not, create a new Problem based on the parameters to solve and add it to the Cache if (problemo == null) { problemo = new Problem(problem.FirstBucketCapacity, problem.SecondBucketCapacity, problem.GoalWaterVolume); HttpContext.Current.Cache.Insert(problem.CacheKey, problemo); } // Notify Client(s) that the Problem is being started // ActionBindingThreshold tells the client browser whether to use knockout.js observableArray binding for displaying action step updates or // plain old DOM manipulation - ko.observableArray creates a big performance hit bool bindActions = bigBucket <= ActionBindingThreshold; if (BroadcastAll) { Clients.All.startedProblem(problem, bindActions && !work.UseWorker); } else { Clients.Caller.startedProblem(problem, bindActions && !work.UseWorker); } // Check to see if it is a solvable Problem if (!problemo.IsSolvable) { // If not a solvable problem then notify the clients and do not try to solve it if (BroadcastAll) { Clients.All.notSolvable(problem); } else { Clients.Caller.notSolvable(problem); } } else { // Try-Catch didn't work here to solve the problem I was having //try //{ var problemTasks = work.UseWorker ? SolveProblemWithWorkerAsync(problemo, work) : SolveAProblemAsync(problemo, work); // problemTasks.ContinueWith(t => // { // if (t.IsFaulted) // Clients.All.errorInSolve(t.Exception); // }); //} //catch (Exception ex) //{ // Clients.All.errorInSolve(ex); //} } }
public override void Run() { // This is a sample worker implementation. Replace with your logic. Trace.WriteLine("WaterBucketWorker entry point called", "Information"); // Check whether the CloudQueue is for the staging or production environment, which use separate Queues in order to provide fully isolated environments // IF THIS IS CHANGED - the WorkerRole MUST be RESTARTED in order for it to take affect bool isStaging = RoleEnvironmentExt.GetRoleConfigSetting("UseStaging", false); // Get the CloudQueue on which we want to get messages from the WebRoles for the work to be done // NOTE: don't use RoleEnvironmentExt.GetRoleConfigSetting() as the WorkerRole should crash if the setting isn't configured properly string storageAccountConStr = !isStaging ? RoleEnvironment.GetConfigurationSettingValue("StorageAccount") : RoleEnvironment.GetConfigurationSettingValue("StagingStorageAccount"); CloudStorageAccount storageAccount = CloudStorageAccount.Parse(storageAccountConStr); CloudQueueClient qClient = storageAccount.CreateCloudQueueClient(); CloudQueue queue = qClient.GetQueueReference(CLOUD_Q_FOR_WORK); queue.CreateIfNotExists(); // Get the TCP address for publishing update messages for work being done using ZeroMQ RoleInstanceEndpoint zeromqPubEP = RoleEnvironment.CurrentRoleInstance.InstanceEndpoints.ContainsKey(ZMQ_PUBLISHER) ? RoleEnvironment.CurrentRoleInstance.InstanceEndpoints[ZMQ_PUBLISHER] : null; // Gracefully just not work to prevent restarts and errors from a known configuration limitation - allows testing WebRoles properly detecting inability to connect by changing the EndPoint name while (zeromqPubEP == null) { Thread.Sleep(1000); zeromqPubEP = RoleEnvironment.CurrentRoleInstance.InstanceEndpoints.ContainsKey(ZMQ_PUBLISHER) ? RoleEnvironment.CurrentRoleInstance.InstanceEndpoints[ZMQ_PUBLISHER] : null; } //if (zeromqPubEP == null) //{ // throw new Exception("Could not get 'ZmqPublisher' Endpoint"); //} string publishAddress = string.Format("tcp://{0}:{1}", zeromqPubEP.IPEndpoint.Address.ToString(), zeromqPubEP.IPEndpoint.Port); //this.GetRoleConfigSetting("PublisherAddress", "tcp://127.0.0.1:9898"); //string baseIPAddress; //try //{ // baseIPAddress = RoleEnvironment.GetConfigurationSettingValue("BaseIPAddress"); //} //catch (RoleEnvironmentException rex) //{ // //ErrorSignal. // baseIPAddress = "127.0.0.1"; //} string signalAddress = null; bool useSignal = false; try { // Not the right way to get addresses for ZmqSockets - need to use Azure EndPoints and configure them in the WebRole/WorkerRoles before publishing signalAddress = RoleEnvironment.GetConfigurationSettingValue("SignallerAddress"); useSignal = !string.IsNullOrWhiteSpace(signalAddress); } catch (RoleEnvironmentException rex) { useSignal = false; } using (var ctx = ZmqContext.Create()) { try { // Use XPUB-XSUB forwarding from Internal Publish to External TCP Publish or use the clrzmq provided ForwardDevice which uses regular PUB-SUB sockets? // XPUB-XSUB was designed by the ZeroMQ team for this use case bool useXForwarder = RoleEnvironmentExt.GetRoleConfigSetting("Forwarder.UseX", false); using (Device forwarder = !useXForwarder ? new ForwarderDevice(ctx, INTERNAL_PUB_ADDRESS, publishAddress, DeviceMode.Threaded) as Device : new XForwarderDevice(ctx, INTERNAL_PUB_ADDRESS, publishAddress, DeviceMode.Threaded)) { forwarder.FrontendSetup.SubscribeAll(); forwarder.Start(); //using (ZmqSocket pairSubSocket = ctx.CreateSocket(SocketType.XSUB)) //{ // pairSubSocket.Bind(INTERNAL_PUB_ADDRESS); // pairSubSocket.SubscribeAll(); // using (ZmqSocket pairPubSocket = ctx.CreateSocket(SocketType.XPUB)) // { // pairPubSocket.Bind(publishAddress); // pairSubSocket.Forward(pairPubSocket); //using (ZmqSocket socket = ctx.CreateSocket(SocketType.PUB)) //{ // socket.Connect("inproc://publishing"); //socket.Bind(publishAddress); // Wait a beat for work to arrive Thread.Sleep(3000); while (true) { CloudQueueMessage msg = null; // Read problem to solve off of the queue msg = queue.GetMessage(TimeSpan.FromMinutes(3)); // If no problems on the queue if (msg == null) { // Using Signal Sockets to receive work would be more efficient that using Thread.Sleep if (useSignal) { #region Using Signaller Socket // Instead of waiting in a Sleep cycle, let the application Signal you to wake up when there is a problem to work on using (var signaller = ctx.CreateSocket(SocketType.PULL)) { signaller.Bind(signalAddress); while (true) { var signal = signaller.ReceiveMessage(); // Empty messages are false signals if (signal.IsEmpty) continue; // Having now been signalled to wake up, get a problem from the queue msg = queue.GetMessage(TimeSpan.FromMinutes(3)); // If this is the first worker to get the problem, then attempt to solve it if (msg != null) { break; } } } #endregion } else // If not using a Signal, then sleep and repeat the while loop to get a message { Thread.Sleep(500); continue; } } // Detect if the Work to be done is a poison pill and delete it without working on it if (msg.DequeueCount > 5) { queue.DeleteMessage(msg); continue; } try { //bool yieldOnAction = RoleEnvironmentExt.GetRoleConfigSetting("Action.On.Yield", false); //int workTimeout = RoleEnvironmentExt.GetRoleConfigSetting("Work.Timeout", 0); // Turn the Queue Message into a Problem and some WorkOrder settings byte[] problemBytes = msg.AsBytes; int firstCapacity = BitConverter.ToInt32(problemBytes, 0); int secondCapacity = BitConverter.ToInt32(problemBytes, 4); int waterGoal = BitConverter.ToInt32(problemBytes, 8); int startDelay = BitConverter.ToInt32(problemBytes, 12); int workDelay = BitConverter.ToInt32(problemBytes, 16); Problem problemToSolve = new Problem(firstCapacity, secondCapacity, waterGoal); // Get the SolutionStrategies to be employed in solving the problem ISolutionStrategy smallToBig, bigToSmall; smallToBig = new SmallToBigSingleBucketSolutionStrategy(problemToSolve); bigToSmall = new BigToSmallSingleBucketSolutionStrategy(problemToSolve); // Get a binary formatter for messages that will be local to the Task thread BinaryFormatter startSmallFormatter = new BinaryFormatter(); //var scheduler = System.Threading.Tasks.TaskScheduler.FromCurrentSynchronizationContext(); //var startSmallUpdates = from step in problemToSolve.Solve(smallToBig).ToObservable() // select GetUpdateMessage(smallToBig, step.ActionTaken == BucketActions.Init ? ProblemUpdateType.Initial : ProblemUpdateType.Action, step); //bool startSmallCompleted = false; //using (startSmallUpdates.ObserveOn(System.Reactive.Concurrency.CurrentThreadScheduler.Instance).Subscribe( // actMsg => // { // socket.SendMessage(actMsg); // }, // ex => SolutionExceptionOccurred(smallToBig, socket, ex, startSmallFormatter), // () => SolutionCompletion(smallToBig, socket, ref startSmallCompleted, startSmallFormatter))) //{ // while (!startSmallCompleted) // { // Thread.Sleep(300); // } // queue.DeleteMessage(msg); //} //var scheduler = System.Reactive.Concurrency.Scheduler.CurrentThread; //var thread = System.Threading.Thread.CurrentThread; //var s = new System.Reactive.Concurrency.EventLoopScheduler(ts => thread); if (startDelay > 0) Thread.Sleep(startDelay); // Create a task for performing work using a SolutionStrategy and create a ZmqSocket to publish updates internally which // will be forwarded by the ForwarderDevice to any subscribers listening over our TCP Publish Endpoint var startSmallTask = Task.Run(() => { using (ZmqSocket smallPubSocket = ctx.CreateSocket(SocketType.PUB)) { smallPubSocket.Connect(INTERNAL_PUB_ADDRESS); try { // Iterate Each action step for the Small to Big Solution Strategy foreach (var step in problemToSolve.Solve(smallToBig)) { SolutionAction(smallToBig, smallPubSocket, step, startSmallFormatter); // if work order asked for artificial work delay if (workDelay > 0) Thread.Sleep(workDelay); // If work order asked for artificial concurrency else if (workDelay == 0) Task.Yield(); } SolutionCompletion(smallToBig, smallPubSocket, startSmallFormatter); } catch (Exception ex) { SolutionExceptionOccurred(smallToBig, smallPubSocket, ex, startSmallFormatter); } //using (problemToSolve.Solve(smallToBig).ToObservable().Subscribe( // step => SolutionAction(smallToBig, smallPubSocket, step, startSmallFormatter), // ex => SolutionExceptionOccurred(smallToBig, smallPubSocket, ex, startSmallFormatter), // () => SolutionCompletion(smallToBig, smallPubSocket, startSmallFormatter))) //{ // // All activity is done by the subscribe handlers, we use the using clause to efficiently clean up // // the IDisposable from the Subscribe method //} } }); // Get a binary formatter for messages that will be local to the Task thread BinaryFormatter startBigFormatter = new BinaryFormatter(); // Create a task for performing work using a SolutionStrategy and create a ZmqSocket to publish updates internally which // will be forwarded by the ForwarderDevice to any subscribers listening over our TCP Publish Endpoint var startBigTask = Task.Run(() => { //bool bigIsComplete = false; using (ZmqSocket bigPubSocket = ctx.CreateSocket(SocketType.PUB)) { bigPubSocket.Connect(INTERNAL_PUB_ADDRESS); try { foreach (var step in problemToSolve.Solve(bigToSmall)) { SolutionAction(bigToSmall, bigPubSocket, step, startBigFormatter); if (workDelay > 0) Thread.Sleep(workDelay); else if (workDelay == 0) Task.Yield(); } SolutionCompletion(bigToSmall, bigPubSocket, startBigFormatter); } catch (Exception ex) { SolutionExceptionOccurred(bigToSmall, bigPubSocket, ex, startBigFormatter); } //using (problemToSolve.Solve(bigToSmall).ToObservable().Subscribe( // step => SolutionAction(bigToSmall, bigPubSocket, step, startBigFormatter), // ex => SolutionExceptionOccurred(bigToSmall, bigPubSocket, ex, startBigFormatter), // () => SolutionCompletion(bigToSmall, bigPubSocket, ref bigIsComplete, startBigFormatter))) //{ // // All activity is done by the subscribe handlers, we use the using clause to efficiently clean up // // the IDisposable from the Subscribe method // //while (!bigIsComplete) // // yield; //} } }); // Wait for the work of both SolutionStrategies to complete before deleting the Queue message Task.WhenAll(startSmallTask, startBigTask) .ContinueWith(t => { if (t.IsFaulted) { // TODO: Log the t.Exception } else if (t.IsCanceled) { // TODO: Log the Cancellation of the Task } else if (t.IsCompleted) { // We can remove the message from the Queue so it's not executed again queue.DeleteMessage(msg); } }).Wait(); } catch (Exception ex) { // log the Exception } //} } } } catch (ZmqDeviceException zde) { throw new Exception("Encountered ZmqDeviceException [" + zde.ToString() + " - " + zde.Message + "]", zde); } catch (ZmqSocketException zse) { throw new Exception("Encountered ZmqSocketException [" + zse.ToString() + " - " + zse.Message + "]", zse); } } }
public SmallToBigSingleBucketSolutionStrategy(Problem problem) : base("SmallToBig", problem.FirstBucket.Capacity < problem.SecondBucket.Capacity ? problem.FirstBucket : problem.SecondBucket, problem.FirstBucket.Capacity < problem.SecondBucket.Capacity ? problem.SecondBucket : problem.FirstBucket, problem.GoalWaterVolume, problem.WaterSource) { }
public BigToSmallSingleBucketSolutionStrategy(Problem problem) : base("BigToSmall", problem.FirstBucket.Capacity > problem.SecondBucket.Capacity ? problem.FirstBucket : problem.SecondBucket, problem.FirstBucket.Capacity > problem.SecondBucket.Capacity ? problem.SecondBucket : problem.FirstBucket, problem.GoalWaterVolume, problem.WaterSource) { }
public void TestSocketReceive() { // Arrange var ctx = ZmqContext.Create(); var sub = ctx.CreateSocket(SocketType.SUB); Problem testProblem = new Problem(3, 5, 4); byte[] qMsgBytes = new byte[12]; Buffer.BlockCopy(BitConverter.GetBytes(testProblem.FirstBucket.Capacity), 0, qMsgBytes, 0, 4); Buffer.BlockCopy(BitConverter.GetBytes(testProblem.SecondBucket.Capacity), 0, qMsgBytes, 4, 4); Buffer.BlockCopy(BitConverter.GetBytes(testProblem.GoalWaterVolume), 0, qMsgBytes, 8, 4); CloudQueueMessage qMessage = new CloudQueueMessage(qMsgBytes); var smallToBig = new SmallToBigSingleBucketSolutionStrategy(testProblem); var bigToSmall = new BigToSmallSingleBucketSolutionStrategy(testProblem); sub.Connect(_publishAddress); sub.Subscribe(Encoding.UTF8.GetBytes(smallToBig.Signature)); sub.Subscribe(Encoding.UTF8.GetBytes(bigToSmall.Signature)); // Act var subTask = Task.Run(() => { ProblemUpdate update = null; ZmqMessage zqm = sub.ReceiveMessage(TimeSpan.FromSeconds(2)); if (zqm == null) { Assert.Fail("Did not receive a message from the subscription socket within 2 seconds of adding message to Queue"); } int[] numActions = new int[2] { 0, 0 }; bool[] completed = new bool[2] { false, false }; while ((zqm != null) && (!completed.All(c => c))) { zqm = sub.ReceiveMessage(); string msgSig = Encoding.UTF8.GetString(zqm.Unwrap().Buffer); update = new ProblemUpdate(zqm.Select(f => f.Buffer).ToArray()); if (update.IsAction) { if (msgSig.Equals(smallToBig.Signature)) { numActions[SMALL_TO_BIG]++; } else if (msgSig.Equals(bigToSmall.Signature)) { numActions[BIG_TO_SMALL]++; } } else if (update.IsCompletion) { if (msgSig.Equals(smallToBig.Signature)) { completed[SMALL_TO_BIG] = true; } else if (msgSig.Equals(bigToSmall.Signature)) { completed[BIG_TO_SMALL] = true; } } } // Assert Assert.IsFalse(update.IsError, "Received an Exception from the Socket"); Assert.IsTrue(completed.All(c => c), "Not all strategies completed"); Assert.AreEqual(NUM_ACTIONS[SMALL_TO_BIG], numActions[SMALL_TO_BIG], "Small to Big strategy received wrong number of action messages (" + numActions[SMALL_TO_BIG] + ") from socket - expected " + NUM_ACTIONS[SMALL_TO_BIG]); Assert.AreEqual(NUM_ACTIONS[BIG_TO_SMALL], numActions[BIG_TO_SMALL], "Big to Small strategy received wrong number of action messages (" + numActions[BIG_TO_SMALL] + ") from socket - expected " + NUM_ACTIONS[BIG_TO_SMALL]); }); //if ((subTask.Status == TaskStatus.WaitingToRun))// || (subTask.Status == TaskStatus.WaitingForActivation)) // subTask.Start(); _queue.AddMessage(qMessage); subTask.ContinueWith(t => { if (t.IsFaulted) Assert.Fail("subTask threw Exception: " + t.Exception.ToString() + " " + t.Exception.Message); sub.UnsubscribeAll(); sub.Disconnect(_publishAddress); sub.Dispose(); ctx.Dispose(); }); }