/** * Prints {@link com.revo.deployr.client.broker.RTaskResult} * to console output. */ public static void printRTaskResult(RTask task, RTaskResult result, String error) { Console.WriteLine("\nTask: " + task); if (error != null) { Console.WriteLine("Status[fail]: cause=" + error); } else { switch (result.getType()) { case RTaskType.DISCRETE: if (result.isSuccess()) { Console.WriteLine("Status[ok]: [ code : " + result.getTimeOnCode() + " , server : " + result.getTimeOnServer() + " , call : " + result.getTimeOnCall() + " ]"); } else { Console.WriteLine("Status[fail]: cause=" + result.getFailure()); } break; case RTaskType.POOLED: if (result.isSuccess()) { Console.WriteLine("Status[ok]: [ code : " + result.getTimeOnCode() + " , server : " + result.getTimeOnServer() + " , call : " + result.getTimeOnCall() + " ]"); } else { Console.WriteLine("Status[fail]: cause=" + result.getFailure()); } break; case RTaskType.BACKGROUND: if (result.isSuccess()) { Console.WriteLine("Status[ok]: [ server : " + result.getTimeOnServer() + " , call : " + result.getTimeOnCall() + " ]"); } else { Console.WriteLine("Status[fail]: cause=" + result.getFailure()); } break; } } }
/** * Prints {@link com.revo.deployr.client.broker.RTaskResult} * to console output. */ public static void printRTaskResult(RTask task, RTaskResult result, String error) { Console.WriteLine("\nTask: " + task); if(error != null) { Console.WriteLine("Status[fail]: cause=" + error); } else { switch(result.getType()) { case RTaskType.DISCRETE: if(result.isSuccess()) { Console.WriteLine("Status[ok]: [ code : " + result.getTimeOnCode() + " , server : " + result.getTimeOnServer() + " , call : " + result.getTimeOnCall() + " ]"); } else { Console.WriteLine("Status[fail]: cause=" + result.getFailure()); } break; case RTaskType.POOLED: if(result.isSuccess()) { Console.WriteLine("Status[ok]: [ code : " + result.getTimeOnCode() + " , server : " + result.getTimeOnServer() + " , call : " + result.getTimeOnCall() + " ]"); } else { Console.WriteLine("Status[fail]: cause=" + result.getFailure()); } break; case RTaskType.BACKGROUND: if(result.isSuccess()) { Console.WriteLine("Status[ok]: [ server : " + result.getTimeOnServer() + " , call : " + result.getTimeOnCall() + " ]"); } else { Console.WriteLine("Status[fail]: cause=" + result.getFailure()); } break; } } }
/// <summary> /// Returns a resource token for the task back to the token pool. /// </summary> /// <param name="task">RTask submitted for execution as a background task</param> /// <param name="result">RTaskResult containing the results of the completed task</param> /// <remarks></remarks> public override void callback(RTask task, RTaskResult result) { Object obj; m_taskResourceTokenMap.TryGetValue(task, out obj); RProject rProject = (RProject)obj; /* * Check for Grid Exception */ Boolean bGridException = false; Exception failure = result.getFailure(); if (failure != null) { if (failure.GetType() == typeof(HTTPRestException)) { HTTPRestException ex = (HTTPRestException)failure; if (ex.errorCode >= 910 || ex.errorCode == 403) { bGridException = true; } } } if (bGridException == true) { /* * On detection of an RGridException drop the RProject from * the pool so further tasks are not directed to that RProject. * We achieve this by simply not adding the RProject back to the * resourceTokenPool on this callback. * * We then need to adjust the parallelTaskLimit so the RBroker * will report the new (smaller) pool size on * RBroker.maxConcurrency() calls. */ if (m_taskListener != null) { /* * When asynchronous listener in use, failed task * executions due to slot or grid failures can be * automatically resubmitted for execution by the RBroker. * * When RTaskResult.repeatTask is enabled the * RBrokerEngine.RBrokerListenerManager will skip * calling taskListener.onTaskCompleted(task, result). * This prevents a client application from seeing * (or having to handle) temporary slot or grid related * failures on RTasks. */ RTaskResultImpl resultImpl = (RTaskResultImpl)result; resultImpl.repeatTask = true; /* * Now re-submit for execution using the priority * queue to expedite processing. */ try { submit(task, true); } catch (Exception tex) { throw new Exception("PooledTaskBroker: callback, task re-submission ex=" + tex.ToString()); } } int resizedPoolSize = (int)Interlocked.Decrement(ref m_parallelTaskLimit); if (m_brokerListener != null) { Exception rbex; if (resizedPoolSize == 0) { rbex = new Exception("DeployR grid failure detected, pool no longer operational, advise RBroker shutdown."); } else { rbex = new Exception("DeployR grid failure detected, pool size auto-adjusted, max concurrency now " + resizedPoolSize + "."); } m_brokerListener.onRuntimeError(rbex.Message); } } else { if (rProject != null) { Boolean added = m_resourceTokenPool.TryAdd(rProject); if (!added) { throw new Exception("PooledTaskBroker: callback, project could not be added back to pool?"); } } else { throw new Exception("PooledTaskBroker: callback, task does not have matching project?"); } } }
/// <summary> /// Returns a resource token for the task back to the token pool. /// </summary> /// <param name="task">RTask submitted for execution as a background task</param> /// <param name="result">RTaskResult containing the results of the completed task</param> /// <remarks></remarks> public override void callback(RTask task, RTaskResult result) { Object obj; m_taskResourceTokenMap.TryGetValue(task, out obj); RProject rProject = (RProject)obj; /* * Check for Grid Exception */ Boolean bGridException = false; Exception failure = result.getFailure(); if (failure != null) { if (failure.GetType() == typeof(HTTPRestException)) { HTTPRestException ex = (HTTPRestException)failure; if (ex.errorCode >= 910 || ex.errorCode ==403) { bGridException = true; } } } if (bGridException == true) { /* * On detection of an RGridException drop the RProject from * the pool so further tasks are not directed to that RProject. * We achieve this by simply not adding the RProject back to the * resourceTokenPool on this callback. * * We then need to adjust the parallelTaskLimit so the RBroker * will report the new (smaller) pool size on * RBroker.maxConcurrency() calls. */ if (m_taskListener != null) { /* * When asynchronous listener in use, failed task * executions due to slot or grid failures can be * automatically resubmitted for execution by the RBroker. * * When RTaskResult.repeatTask is enabled the * RBrokerEngine.RBrokerListenerManager will skip * calling taskListener.onTaskCompleted(task, result). * This prevents a client application from seeing * (or having to handle) temporary slot or grid related * failures on RTasks. */ RTaskResultImpl resultImpl = (RTaskResultImpl)result; resultImpl.repeatTask = true; /* * Now re-submit for execution using the priority * queue to expedite processing. */ try { submit(task, true); } catch (Exception tex) { throw new Exception("PooledTaskBroker: callback, task re-submission ex=" + tex.ToString()); } } int resizedPoolSize = (int)Interlocked.Decrement(ref m_parallelTaskLimit); if (m_brokerListener != null) { Exception rbex; if (resizedPoolSize == 0) { rbex = new Exception("DeployR grid failure detected, pool no longer operational, advise RBroker shutdown."); } else { rbex = new Exception("DeployR grid failure detected, pool size auto-adjusted, max concurrency now " + resizedPoolSize + "."); } m_brokerListener.onRuntimeError(rbex.Message); } } else { if (rProject != null) { Boolean added = m_resourceTokenPool.TryAdd(rProject); if (!added) { throw new Exception("PooledTaskBroker: callback, project could not be added back to pool?"); } } else { throw new Exception("PooledTaskBroker: callback, task does not have matching project?"); } } }