protected internal override bool RetryBatch(Cluster cluster, int socketTimeout, int totalTimeout, DateTime deadline, int iteration, int commandSentCounter) { // Retry requires keys for this node to be split among other nodes. // This is both recursive and exponential. List <BatchNode> batchNodes = BatchNode.GenerateList(cluster, policy, records, sequence, batch); if (batchNodes.Count == 1 && batchNodes[0].node == batch.node) { // Batch node is the same. Go through normal retry. return(false); } // Run batch requests sequentially in same thread. foreach (BatchNode batchNode in batchNodes) { MultiCommand command = new BatchReadListCommand(parent, batchNode, policy, records); command.sequence = sequence; command.Execute(cluster, policy, null, batchNode.node, true, socketTimeout, totalTimeout, deadline, iteration, commandSentCounter); } return(true); }
//------------------------------------------------------- // Batch Read Operations //------------------------------------------------------- /// <summary> /// Read multiple records for specified batch keys in one batch call. /// This method allows different namespaces/bins to be requested for each key in the batch. /// The returned records are located in the same list. /// If the BatchRecord key field is not found, the corresponding record field will be null. /// The policy can be used to specify timeouts and maximum concurrent threads. /// This method requires Aerospike Server version >= 3.6.0. /// </summary> /// <param name="policy">batch configuration parameters, pass in null for defaults</param> /// <param name="records">list of unique record identifiers and the bins to retrieve. /// The returned records are located in the same list.</param> /// <exception cref="AerospikeException">if read fails</exception> public void Get(BatchPolicy policy, List<BatchRead> records) { if (records.Count == 0) { return; } if (policy == null) { policy = batchPolicyDefault; } List<BatchNode> batchNodes = BatchNode.GenerateList(cluster, policy, records); if (policy.maxConcurrentThreads == 1 || batchNodes.Count <= 1) { // Run batch requests sequentially in same thread. foreach (BatchNode batchNode in batchNodes) { if (!batchNode.node.hasBatchIndex) { throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Requested command requires a server that supports new batch index protocol."); } MultiCommand command = new BatchReadListCommand(batchNode, policy, records); command.Execute(); } } else { // Run batch requests in parallel in separate threads. // // Multiple threads write to the record list, so one might think that // volatile or memory barriers are needed on the write threads and this read thread. // This should not be necessary here because it happens in Executor which does a // volatile write (Interlocked.Increment(ref completedCount)) at the end of write threads // and a synchronized WaitTillComplete() in this thread. Executor executor = new Executor(batchNodes.Count); foreach (BatchNode batchNode in batchNodes) { if (!batchNode.node.hasBatchIndex) { throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Requested command requires a server that supports new batch index protocol."); } MultiCommand command = new BatchReadListCommand(batchNode, policy, records); executor.AddCommand(command); } executor.Execute(policy.maxConcurrentThreads); } }