public void ExecuteBatchRetry(AsyncMultiCommand[] cmds, AsyncMultiCommand orig) { // Create new commands array. AsyncMultiCommand[] target = new AsyncMultiCommand[commands.Length + cmds.Length - 1]; int count = 0; foreach (AsyncMultiCommand cmd in commands) { if (cmd != orig) { target[count++] = cmd; } } foreach (AsyncMultiCommand cmd in cmds) { target[count++] = cmd; } commands = target; // Batch executors always execute all commands at once. // Execute all new commands. maxConcurrent = commands.Length; foreach (AsyncMultiCommand cmd in cmds) { cmd.Execute(); } }
protected internal override bool RetryBatch() { // Retry requires keys for this node to be split among other nodes. // This can cause an exponential number of commands. List <BatchNode> batchNodes = GenerateBatchNodes(); if (batchNodes.Count == 1 && batchNodes[0].node == batch.node) { // Batch node is the same. Go through normal retry. return(false); } // Close original command. base.PutBackArgsOnError(); // Execute new commands. AsyncMultiCommand[] cmds = new AsyncMultiCommand[batchNodes.Count]; int count = 0; foreach (BatchNode batchNode in batchNodes) { AsyncBatchCommand cmd = CreateCommand(batchNode); cmd.sequenceAP = sequenceAP; cmd.sequenceSC = sequenceSC; cmd.SetBatchRetry(this); cmds[count++] = cmd; } parent.ExecuteBatchRetry(cmds, this); return(true); }
public AsyncBatchReadListExecutor ( AsyncCluster cluster, BatchPolicy policy, BatchListListener listener, List <BatchRead> records ) { this.listener = listener; this.records = records; // Create commands. List <BatchNode> batchNodes = BatchNode.GenerateList(cluster, policy, records); AsyncMultiCommand[] tasks = new AsyncMultiCommand[batchNodes.Count]; int count = 0; foreach (BatchNode batchNode in batchNodes) { if (!batchNode.node.hasBatchIndex) { throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Requested command requires a server that supports new batch index protocol."); } tasks[count++] = new AsyncBatchReadListCommand(this, cluster, batchNode, policy, records); } // Dispatch commands to nodes. Execute(tasks, policy.maxConcurrentThreads); }
public AsyncBatchExistsSequenceExecutor ( AsyncCluster cluster, BatchPolicy policy, Key[] keys, ExistsSequenceListener listener ) : base(cluster, policy, keys) { this.listener = listener; // Create commands. AsyncMultiCommand[] tasks = new AsyncMultiCommand[base.taskSize]; int count = 0; foreach (BatchNode batchNode in batchNodes) { if (batchNode.node.UseNewBatch(policy)) { // New batch tasks[count++] = new AsyncBatchExistsSequenceCommand(this, cluster, batchNode, policy, keys, listener); } else { // Old batch only allows one namespace per call. foreach (BatchNode.BatchNamespace batchNamespace in batchNode.batchNamespaces) { tasks[count++] = new AsyncBatchExistsSequenceDirect(this, cluster, (AsyncNode)batchNode.node, batchNamespace, policy, keys, listener); } } } // Dispatch commands to nodes. Execute(tasks, policy.maxConcurrentThreads); }
protected internal override void Retry(AerospikeException ae) { if (!(policy.replica == Replica.SEQUENCE || policy.replica == Replica.PREFER_RACK) || parent.IsDone()) { base.Retry(ae); return; } // Retry requires keys for this node to be split among other nodes. // This can cause an exponential number of commands. List <BatchNode> batchNodes = GenerateBatchNodes(); if (batchNodes.Count == 1 && batchNodes[0].node == batch.node) { // Batch node is the same. Go through normal retry. base.Retry(ae); return; } // Close original command. base.PutBackArgsOnError(); // Execute new commands. AsyncMultiCommand[] cmds = new AsyncMultiCommand[batchNodes.Count]; int count = 0; foreach (BatchNode batchNode in batchNodes) { AsyncMultiCommand cmd = CreateCommand(batchNode); cmd.SetBatchRetry(this); cmds[count++] = cmd; } parent.ExecuteBatchRetry(cmds, this); }
public AsyncBatchExistsArrayExecutor ( AsyncCluster cluster, BatchPolicy policy, Key[] keys, ExistsArrayListener listener ) : base(cluster, policy, keys) { this.existsArray = new bool[keys.Length]; this.listener = listener; // Create commands. AsyncMultiCommand[] tasks = new AsyncMultiCommand[base.taskSize]; int count = 0; foreach (BatchNode batchNode in batchNodes) { if (batchNode.node.UseNewBatch(policy)) { // New batch tasks[count++] = new AsyncBatchExistsArrayCommand(this, cluster, batchNode, policy, keys, existsArray); } else { // Old batch only allows one namespace per call. foreach (BatchNode.BatchNamespace batchNamespace in batchNode.batchNamespaces) { tasks[count++] = new AsyncBatchExistsArrayDirect(this, cluster, (AsyncNode)batchNode.node, batchNamespace, policy, keys, existsArray); } } } // Dispatch commands to nodes. Execute(tasks, 0); }
public void Execute(AsyncMultiCommand[] commands, int maxConcurrent) { this.commands = commands; this.maxConcurrent = (maxConcurrent == 0 || maxConcurrent >= commands.Length) ? commands.Length : maxConcurrent; for (int i = 0; i < this.maxConcurrent; i++) { commands[i].Execute(); } }
protected internal override bool RetryBatch() { List <BatchNode> batchNodes = null; try { // Retry requires keys for this node to be split among other nodes. // This can cause an exponential number of commands. batchNodes = GenerateBatchNodes(); if (batchNodes.Count == 1 && batchNodes[0].node == batch.node) { // Batch node is the same. Go through normal retry. // Normal retries reuse eventArgs, so PutBackArgsOnError() // should not be called here. return(false); } } catch (Exception) { // Close original command. base.PutBackArgsOnError(); throw; } // Close original command. base.PutBackArgsOnError(); // Execute new commands. AsyncMultiCommand[] cmds = new AsyncMultiCommand[batchNodes.Count]; int count = 0; foreach (BatchNode batchNode in batchNodes) { AsyncBatchCommand cmd = CreateCommand(batchNode); cmd.sequenceAP = sequenceAP; cmd.sequenceSC = sequenceSC; cmd.SetBatchRetry(this); cmds[count++] = cmd; } // Retry new commands. parent.Retry(cmds); // Return true so original batch command is stopped. return(true); }
public AsyncBatchExistsSequenceExecutor ( AsyncCluster cluster, BatchPolicy policy, Key[] keys, ExistsSequenceListener listener ) : base(cluster, policy, keys, false) { this.listener = listener; // Create commands. AsyncMultiCommand[] tasks = new AsyncMultiCommand[base.taskSize]; int count = 0; foreach (BatchNode batchNode in batchNodes) { tasks[count++] = new AsyncBatchExistsSequenceCommand(this, cluster, batchNode, policy, keys, listener); } // Dispatch commands to nodes. Execute(tasks, 0); }
public AsyncBatchExistsArrayExecutor ( AsyncCluster cluster, BatchPolicy policy, Key[] keys, ExistsArrayListener listener ) : base(cluster, policy, keys, true) { this.existsArray = new bool[keys.Length]; this.listener = listener; // Create commands. AsyncMultiCommand[] tasks = new AsyncMultiCommand[base.taskSize]; int count = 0; foreach (BatchNode batchNode in batchNodes) { tasks[count++] = new AsyncBatchExistsArrayCommand(this, cluster, batchNode, policy, keys, existsArray); } // Dispatch commands to nodes. Execute(tasks, 0); }
public AsyncBatchReadSequenceExecutor ( AsyncCluster cluster, BatchPolicy policy, BatchSequenceListener listener, List <BatchRead> records ) : base(cluster, false) { this.listener = listener; // Create commands. List <BatchNode> batchNodes = BatchNode.GenerateList(cluster, policy, records); AsyncMultiCommand[] tasks = new AsyncMultiCommand[batchNodes.Count]; int count = 0; foreach (BatchNode batchNode in batchNodes) { tasks[count++] = new AsyncBatchReadSequenceCommand(this, cluster, batchNode, policy, listener, records); } // Dispatch commands to nodes. Execute(tasks, 0); }
public AsyncBatchGetSequenceExecutor ( AsyncCluster cluster, BatchPolicy policy, RecordSequenceListener listener, Key[] keys, string[] binNames, int readAttr ) : base(cluster, policy, keys) { this.listener = listener; // Create commands. AsyncMultiCommand[] tasks = new AsyncMultiCommand[base.taskSize]; int count = 0; foreach (BatchNode batchNode in batchNodes) { tasks[count++] = new AsyncBatchGetSequenceCommand(this, cluster, batchNode, policy, keys, binNames, listener, readAttr); } // Dispatch commands to nodes. Execute(tasks, 0); }
public AsyncBatchGetArrayExecutor ( AsyncCluster cluster, BatchPolicy policy, RecordArrayListener listener, Key[] keys, string[] binNames, int readAttr ) : base(cluster, policy, keys) { this.recordArray = new Record[keys.Length]; this.listener = listener; // Create commands. AsyncMultiCommand[] tasks = new AsyncMultiCommand[base.taskSize]; int count = 0; foreach (BatchNode batchNode in batchNodes) { if (batchNode.node.UseNewBatch(policy)) { // New batch tasks[count++] = new AsyncBatchGetArrayCommand(this, cluster, batchNode, policy, keys, binNames, recordArray, readAttr); } else { // Old batch only allows one namespace per call. foreach (BatchNode.BatchNamespace batchNamespace in batchNode.batchNamespaces) { tasks[count++] = new AsyncBatchGetArrayDirect(this, cluster, (AsyncNode)batchNode.node, batchNamespace, policy, keys, binNames, recordArray, readAttr); } } } // Dispatch commands to nodes. Execute(tasks, policy.maxConcurrentThreads); }
private void ExecuteValidateCommand(AsyncMultiCommand command) { AsyncQueryValidate.Validate(cluster, new NextHandler(this, command), command.serverNode, ns, clusterKey); }
public NextHandler(AsyncMultiExecutor parent, AsyncMultiCommand command) { this.parent = parent; this.command = command; }
public AsyncBatchExistsArrayExecutor( AsyncCluster cluster, BatchPolicy policy, Key[] keys, ExistsArrayListener listener ) : base(cluster, policy, keys) { this.existsArray = new bool[keys.Length]; this.listener = listener; // Create commands. AsyncMultiCommand[] tasks = new AsyncMultiCommand[base.taskSize]; int count = 0; foreach (BatchNode batchNode in batchNodes) { if (batchNode.node.UseNewBatch(policy)) { // New batch tasks[count++] = new AsyncBatchExistsArrayCommand(this, cluster, batchNode, policy, keys, existsArray); } else { // Old batch only allows one namespace per call. foreach (BatchNode.BatchNamespace batchNamespace in batchNode.batchNamespaces) { tasks[count++] = new AsyncBatchExistsArrayDirect(this, cluster, (AsyncNode)batchNode.node, batchNamespace, policy, keys, existsArray); } } } // Dispatch commands to nodes. Execute(tasks, policy.maxConcurrentThreads); }
public AsyncBatchReadSequenceExecutor( AsyncCluster cluster, BatchPolicy policy, BatchSequenceListener listener, List<BatchRead> records ) { this.listener = listener; // Create commands. List<BatchNode> batchNodes = BatchNode.GenerateList(cluster, policy, records); AsyncMultiCommand[] tasks = new AsyncMultiCommand[batchNodes.Count]; int count = 0; foreach (BatchNode batchNode in batchNodes) { if (!batchNode.node.hasBatchIndex) { throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Requested command requires a server that supports new batch index protocol."); } tasks[count++] = new AsyncBatchReadSequenceCommand(this, cluster, batchNode, policy, listener, records); } // Dispatch commands to nodes. Execute(tasks, policy.maxConcurrentThreads); }
public AsyncBatchGetSequenceExecutor( AsyncCluster cluster, BatchPolicy policy, RecordSequenceListener listener, Key[] keys, string[] binNames, int readAttr ) : base(cluster, policy, keys) { this.listener = listener; // Create commands. AsyncMultiCommand[] tasks = new AsyncMultiCommand[base.taskSize]; int count = 0; foreach (BatchNode batchNode in batchNodes) { if (batchNode.node.UseNewBatch(policy)) { // New batch tasks[count++] = new AsyncBatchGetSequenceCommand(this, cluster, batchNode, policy, keys, binNames, listener, readAttr); } else { // Old batch only allows one namespace per call. foreach (BatchNode.BatchNamespace batchNamespace in batchNode.batchNamespaces) { tasks[count++] = new AsyncBatchGetSequenceDirect(this, cluster, (AsyncNode)batchNode.node, batchNamespace, policy, keys, binNames, listener, readAttr); } } } // Dispatch commands to nodes. Execute(tasks, policy.maxConcurrentThreads); }
public AsyncMultiCommand(AsyncMultiCommand other) : base(other) { this.executor = other.executor; this.serverNode = other.serverNode; this.stopOnNotFound = other.stopOnNotFound; }
public AsyncMultiCommand(AsyncMultiCommand other) : base(other) { this.parent = other.parent; this.stopOnNotFound = other.stopOnNotFound; }