public AsyncBatchReadListCommand ( AsyncMultiExecutor parent, AsyncCluster cluster, BatchNode batch, BatchPolicy batchPolicy, List <BatchRead> records ) : base(parent, cluster, batchPolicy, (AsyncNode)batch.node, false) { this.batch = batch; this.batchPolicy = batchPolicy; this.records = records; }
public BatchExistsArrayCommand( BatchNode batch, BatchPolicy policy, Key[] keys, bool[] existsArray ) : base(batch.node, false) { this.batch = batch; this.policy = policy; this.keys = keys; this.existsArray = existsArray; }
public AsyncBatchExistsArrayCommand ( AsyncMultiExecutor parent, AsyncCluster cluster, BatchNode batch, BatchPolicy batchPolicy, Key[] keys, bool[] existsArray ) : base(parent, cluster, batch, batchPolicy) { this.keys = keys; this.existsArray = existsArray; }
private static void ExecuteNode(Cluster cluster, BatchNode batchNode, BatchPolicy policy, Key[] keys, bool[] existsArray, Record[] records, string[] binNames, int readAttr) { if (records != null) { MultiCommand command = new BatchGetArrayCommand(null, batchNode, policy, keys, binNames, records, readAttr); command.Execute(cluster, policy, true); } else { MultiCommand command = new BatchExistsArrayCommand(null, batchNode, policy, keys, existsArray); command.Execute(cluster, policy, true); } }
public BatchExistsArrayCommand ( BatchNode batch, BatchPolicy policy, Key[] keys, bool[] existsArray ) : base(batch.node, false) { this.batch = batch; this.policy = policy; this.keys = keys; this.existsArray = existsArray; }
public AsyncBatchExistsSequenceCommand ( AsyncMultiExecutor parent, AsyncCluster cluster, BatchNode batch, BatchPolicy batchPolicy, Key[] keys, ExistsSequenceListener listener ) : base(parent, cluster, batch, batchPolicy) { this.keys = keys; this.listener = listener; }
public BatchExistsArrayCommand ( Cluster cluster, Executor parent, BatchNode batch, BatchPolicy policy, Key[] keys, bool[] existsArray ) : base(cluster, parent, batch, policy) { this.keys = keys; this.existsArray = existsArray; }
public AsyncBatchReadSequenceCommand ( AsyncMultiExecutor parent, AsyncCluster cluster, BatchNode batch, BatchPolicy batchPolicy, BatchSequenceListener listener, List <BatchRead> records ) : base(parent, cluster, batch, batchPolicy) { this.listener = listener; this.records = records; }
public AsyncBatchExecutor(Cluster cluster, BatchPolicy policy, Key[] keys) { this.keys = keys; this.batchNodes = BatchNode.GenerateList(cluster, policy, keys); // Count number of asynchronous commands needed. int size = 0; foreach (BatchNode batchNode in batchNodes) { size += batchNode.batchNamespaces.Count; } completedSize = size; }
public AsyncBatchExistsArrayCommand ( AsyncMultiExecutor parent, AsyncCluster cluster, BatchNode batch, BatchPolicy policy, Key[] keys, bool[] existsArray ) : base(parent, cluster, (AsyncNode)batch.node, false) { this.batch = batch; this.policy = policy; this.keys = keys; this.existsArray = existsArray; }
public BatchExistsArrayCommand ( Executor parent, BatchNode batch, BatchPolicy policy, Key[] keys, bool[] existsArray ) : base(false) { this.parent = parent; this.batch = batch; this.policy = policy; this.keys = keys; this.existsArray = existsArray; }
public AsyncBatchReadSequenceCommand ( AsyncMultiExecutor parent, AsyncCluster cluster, BatchNode batch, BatchPolicy policy, BatchSequenceListener listener, List <BatchRead> records ) : base(parent, cluster, (AsyncNode)batch.node, false) { this.batch = batch; this.policy = policy; this.listener = listener; this.records = records; }
public AsyncBatchExistsSequenceCommand ( AsyncMultiExecutor parent, AsyncCluster cluster, BatchNode batch, BatchPolicy policy, Key[] keys, ExistsSequenceListener listener ) : base(parent, cluster, (AsyncNode)batch.node, false) { this.batch = batch; this.policy = policy; this.keys = keys; this.listener = listener; }
public BatchGetArrayCommand ( Executor parent, BatchNode batch, BatchPolicy policy, Key[] keys, string[] binNames, Record[] records, int readAttr ) : base(parent, batch, policy) { this.keys = keys; this.binNames = binNames; this.records = records; this.readAttr = readAttr; }
public static List <BatchNode> GenerateList ( Cluster cluster, BatchPolicy policy, Key[] keys, uint sequenceAP, uint sequenceSC, BatchNode batchSeed ) { Node[] nodes = cluster.ValidateNodes(); // Create initial key capacity for each node as average + 25%. int keysPerNode = batchSeed.offsetsSize / nodes.Length; keysPerNode += (int)((uint)keysPerNode >> 2); // The minimum key capacity is 10. if (keysPerNode < 10) { keysPerNode = 10; } Replica replica = policy.replica; Replica replicaSC = Partition.GetReplicaSC(policy); // Split keys by server node. List <BatchNode> batchNodes = new List <BatchNode>(nodes.Length); for (int i = 0; i < batchSeed.offsetsSize; i++) { int offset = batchSeed.offsets[i]; Node node = Partition.GetNodeBatchRead(cluster, keys[offset], replica, replicaSC, sequenceAP, sequenceSC); BatchNode batchNode = FindBatchNode(batchNodes, node); if (batchNode == null) { batchNodes.Add(new BatchNode(node, keysPerNode, offset)); } else { batchNode.AddKey(offset); } } return(batchNodes); }
public BatchGetArrayCommand ( BatchNode batch, BatchPolicy policy, Key[] keys, string[] binNames, Record[] records, int readAttr ) : base(batch.node, false) { this.batch = batch; this.policy = policy; this.keys = keys; this.binNames = binNames; this.records = records; this.readAttr = readAttr; }
public AsyncBatchGetSequenceCommand ( AsyncMultiExecutor parent, AsyncCluster cluster, BatchNode batch, BatchPolicy batchPolicy, Key[] keys, string[] binNames, RecordSequenceListener listener, int readAttr ) : base(parent, cluster, batch, batchPolicy) { this.keys = keys; this.binNames = binNames; this.listener = listener; this.readAttr = readAttr; }
public AsyncBatchGetArrayCommand ( AsyncMultiExecutor parent, AsyncCluster cluster, BatchNode batch, BatchPolicy batchPolicy, Key[] keys, string[] binNames, Record[] records, int readAttr ) : base(parent, cluster, batch, batchPolicy) { this.keys = keys; this.binNames = binNames; this.records = records; this.readAttr = readAttr; }
public static List <BatchNode> GenerateList(Cluster cluster, BatchPolicy policy, List <BatchRead> records) { Node[] nodes = cluster.Nodes; if (nodes.Length == 0) { throw new AerospikeException(ResultCode.SERVER_NOT_AVAILABLE, "Command failed because cluster is empty."); } // Create initial key capacity for each node as average + 25%. int max = records.Count; int keysPerNode = max / nodes.Length; keysPerNode += (int)((uint)keysPerNode >> 2); // The minimum key capacity is 10. if (keysPerNode < 10) { keysPerNode = 10; } Replica replica = policy.replica; Replica replicaSC = Partition.GetReplicaSC(policy); // Split keys by server node. List <BatchNode> batchNodes = new List <BatchNode>(nodes.Length); for (int i = 0; i < max; i++) { Node node = Partition.GetNodeBatchRead(cluster, records[i].key, replica, replicaSC, 0, 0); BatchNode batchNode = FindBatchNode(batchNodes, node); if (batchNode == null) { batchNodes.Add(new BatchNode(node, keysPerNode, i)); } else { batchNode.AddKey(i); } } return(batchNodes); }
public static List <BatchNode> GenerateList(Cluster cluster, BatchPolicy policy, Key[] keys, uint sequence, BatchNode batchSeed) { Node[] nodes = cluster.Nodes; if (nodes.Length == 0) { throw new AerospikeException(ResultCode.SERVER_NOT_AVAILABLE, "Command failed because cluster is empty."); } // Create initial key capacity for each node as average + 25%. int keysPerNode = batchSeed.offsetsSize / nodes.Length; keysPerNode += (int)((uint)keysPerNode >> 2); // The minimum key capacity is 10. if (keysPerNode < 10) { keysPerNode = 10; } // Split keys by server node. List <BatchNode> batchNodes = new List <BatchNode>(nodes.Length); for (int i = 0; i < batchSeed.offsetsSize; i++) { int offset = batchSeed.offsets[i]; Partition partition = new Partition(keys[offset]); Node node = GetNode(cluster, policy, partition, sequence); BatchNode batchNode = FindBatchNode(batchNodes, node); if (batchNode == null) { batchNodes.Add(new BatchNode(node, keysPerNode, offset)); } else { batchNode.AddKey(offset); } } return(batchNodes); }
protected internal override bool RetryBatch(Cluster cluster, int socketTimeout, int totalTimeout, DateTime deadline, int iteration, int commandSentCounter) { // Retry requires keys for this node to be split among other nodes. // This is both recursive and exponential. List <BatchNode> batchNodes = BatchNode.GenerateList(cluster, policy, records, sequence, batch); if (batchNodes.Count == 1 && batchNodes[0].node == batch.node) { // Batch node is the same. Go through normal retry. return(false); } // Run batch requests sequentially in same thread. foreach (BatchNode batchNode in batchNodes) { MultiCommand command = new BatchReadListCommand(parent, batchNode, policy, records); command.sequence = sequence; command.Execute(cluster, policy, null, batchNode.node, true, socketTimeout, totalTimeout, deadline, iteration, commandSentCounter); } return(true); }
public static List <BatchNode> GenerateList(Cluster cluster, BatchPolicy policy, Key[] keys) { Node[] nodes = cluster.Nodes; if (nodes.Length == 0) { throw new AerospikeException(ResultCode.SERVER_NOT_AVAILABLE, "Command failed because cluster is empty."); } // Create initial key capacity for each node as average + 25%. int keysPerNode = keys.Length / nodes.Length; keysPerNode += (int)((uint)keysPerNode >> 2); // The minimum key capacity is 10. if (keysPerNode < 10) { keysPerNode = 10; } // Split keys by server node. List <BatchNode> batchNodes = new List <BatchNode>(nodes.Length); for (int i = 0; i < keys.Length; i++) { Partition partition = new Partition(keys[i]); Node node = cluster.GetReadNode(partition, policy.replica); BatchNode batchNode = FindBatchNode(batchNodes, node); if (batchNode == null) { batchNodes.Add(new BatchNode(node, keysPerNode, i)); } else { batchNode.AddKey(i); } } return(batchNodes); }
public static List <BatchNode> GenerateList(Cluster cluster, BatchPolicy policy, List <BatchRead> records) { Node[] nodes = cluster.ValidateNodes(); // Create initial key capacity for each node as average + 25%. int max = records.Count; int keysPerNode = max / nodes.Length; keysPerNode += (int)((uint)keysPerNode >> 2); // The minimum key capacity is 10. if (keysPerNode < 10) { keysPerNode = 10; } Replica replica = policy.replica; Replica replicaSC = Partition.GetReplicaSC(policy); // Split keys by server node. List <BatchNode> batchNodes = new List <BatchNode>(nodes.Length); for (int i = 0; i < max; i++) { Node node = Partition.GetNodeBatchRead(cluster, records[i].key, replica, replicaSC, 0, 0); BatchNode batchNode = FindBatchNode(batchNodes, node); if (batchNode == null) { batchNodes.Add(new BatchNode(node, keysPerNode, i)); } else { batchNode.AddKey(i); } } return(batchNodes); }
public AsyncBatchExistsSequenceExecutor ( AsyncCluster cluster, BatchPolicy policy, Key[] keys, ExistsSequenceListener listener ) { this.listener = listener; // Create commands. List <BatchNode> batchNodes = BatchNode.GenerateList(cluster, policy, keys); AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; int count = 0; foreach (BatchNode batchNode in batchNodes) { commands[count++] = new AsyncBatchExistsSequenceCommand(this, cluster, batchNode, policy, keys, listener); } // Dispatch commands to nodes. Execute(commands); }
public AsyncBatchReadSequenceExecutor ( AsyncCluster cluster, BatchPolicy policy, BatchSequenceListener listener, List <BatchRead> records ) : base(cluster, false) { this.listener = listener; // Create commands. List <BatchNode> batchNodes = BatchNode.GenerateList(cluster, policy, records); AsyncMultiCommand[] tasks = new AsyncMultiCommand[batchNodes.Count]; int count = 0; foreach (BatchNode batchNode in batchNodes) { tasks[count++] = new AsyncBatchReadSequenceCommand(this, cluster, batchNode, policy, listener, records); } // Dispatch commands to nodes. Execute(tasks, 0); }
public AsyncBatchExecutor(Cluster cluster, BatchPolicy policy, Key[] keys) { this.keys = keys; this.batchNodes = BatchNode.GenerateList(cluster, policy, keys); // Count number of asynchronous commands needed. int size = 0; foreach (BatchNode batchNode in batchNodes) { if (batchNode.node.UseNewBatch(policy)) { // New batch size++; } else { // Old batch only allows one namespace per call. batchNode.SplitByNamespace(keys); size += batchNode.batchNamespaces.Count; } } this.taskSize = size; }
private static void ExecuteNode(Cluster cluster, BatchNode batchNode, BatchPolicy policy, Key[] keys, bool[] existsArray, Record[] records, string[] binNames, int readAttr) { if (batchNode.node.UseNewBatch(policy)) { // New batch if (records != null) { MultiCommand command = new BatchGetArrayCommand(batchNode, policy, keys, binNames, records, readAttr); command.Execute(cluster, policy, null, batchNode.node, true); } else { MultiCommand command = new BatchExistsArrayCommand(batchNode, policy, keys, existsArray); command.Execute(cluster, policy, null, batchNode.node, true); } } else { // Old batch only allows one namespace per call. batchNode.SplitByNamespace(keys); foreach (BatchNode.BatchNamespace batchNamespace in batchNode.batchNamespaces) { if (records != null) { MultiCommand command = new BatchGetArrayDirect(batchNamespace, policy, keys, binNames, records, readAttr); command.Execute(cluster, policy, null, batchNode.node, true); } else { MultiCommand command = new BatchExistsArrayDirect(batchNamespace, policy, keys, existsArray); command.Execute(cluster, policy, null, batchNode.node, true); } } } }
public AsyncBatchReadListExecutor ( AsyncCluster cluster, BatchPolicy policy, BatchListListener listener, List <BatchRead> records ) { this.listener = listener; this.records = records; // Create commands. List <BatchNode> batchNodes = BatchNode.GenerateList(cluster, policy, records); AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; int count = 0; foreach (BatchNode batchNode in batchNodes) { commands[count++] = new AsyncBatchReadListCommand(this, cluster, batchNode, policy, records); } // Dispatch commands to nodes. Execute(commands); }
public AsyncBatchReadSequenceCommand( AsyncMultiExecutor parent, AsyncCluster cluster, BatchNode batch, BatchPolicy policy, BatchSequenceListener listener, List<BatchRead> records ) : base(parent, cluster, (AsyncNode)batch.node, false) { this.batch = batch; this.policy = policy; this.listener = listener; this.records = records; }
public static void Execute(Cluster cluster, BatchPolicy policy, Key[] keys, bool[] existsArray, Record[] records, string[] binNames, int readAttr) { if (keys.Length == 0) { return; } if (policy.allowProleReads) { // Send all requests to a single node chosen in round-robin fashion in this transaction thread. Node node = cluster.GetRandomNode(); BatchNode batchNode = new BatchNode(node, keys); ExecuteNode(batchNode, policy, keys, existsArray, records, binNames, readAttr); return; } List<BatchNode> batchNodes = BatchNode.GenerateList(cluster, policy, keys); if (policy.maxConcurrentThreads == 1 || batchNodes.Count <= 1) { // Run batch requests sequentially in same thread. foreach (BatchNode batchNode in batchNodes) { ExecuteNode(batchNode, policy, keys, existsArray, records, binNames, readAttr); } } else { // Run batch requests in parallel in separate threads. // // Multiple threads write to the record/exists array, so one might think that // volatile or memory barriers are needed on the write threads and this read thread. // This should not be necessary here because it happens in Executor which does a // volatile write (Interlocked.Increment(ref completedCount)) at the end of write threads // and a synchronized WaitTillComplete() in this thread. Executor executor = new Executor(batchNodes.Count * 2); // Initialize threads. foreach (BatchNode batchNode in batchNodes) { if (batchNode.node.UseNewBatch(policy)) { // New batch if (records != null) { MultiCommand command = new BatchGetArrayCommand(batchNode, policy, keys, binNames, records, readAttr); executor.AddCommand(command); } else { MultiCommand command = new BatchExistsArrayCommand(batchNode, policy, keys, existsArray); executor.AddCommand(command); } } else { // There may be multiple threads for a single node because the // wire protocol only allows one namespace per command. Multiple namespaces // require multiple threads per node. batchNode.SplitByNamespace(keys); foreach (BatchNode.BatchNamespace batchNamespace in batchNode.batchNamespaces) { if (records != null) { MultiCommand command = new BatchGetArrayDirect(batchNode.node, batchNamespace, policy, keys, binNames, records, readAttr); executor.AddCommand(command); } else { MultiCommand command = new BatchExistsArrayDirect(batchNode.node, batchNamespace, policy, keys, existsArray); executor.AddCommand(command); } } } } executor.Execute(policy.maxConcurrentThreads); } }
private static void ExecuteNode(BatchNode batchNode, BatchPolicy policy, Key[] keys, bool[] existsArray, Record[] records, string[] binNames, int readAttr) { if (batchNode.node.UseNewBatch(policy)) { // New batch if (records != null) { MultiCommand command = new BatchGetArrayCommand(batchNode, policy, keys, binNames, records, readAttr); command.Execute(); } else { MultiCommand command = new BatchExistsArrayCommand(batchNode, policy, keys, existsArray); command.Execute(); } } else { // Old batch only allows one namespace per call. batchNode.SplitByNamespace(keys); foreach (BatchNode.BatchNamespace batchNamespace in batchNode.batchNamespaces) { if (records != null) { MultiCommand command = new BatchGetArrayDirect(batchNode.node, batchNamespace, policy, keys, binNames, records, readAttr); command.Execute(); } else { MultiCommand command = new BatchExistsArrayDirect(batchNode.node, batchNamespace, policy, keys, existsArray); command.Execute(); } } } }
public void SetBatchRead(BatchPolicy policy, Key[] keys, BatchNode batch, string[] binNames, int readAttr) { // Estimate full row size int[] offsets = batch.offsets; int max = batch.offsetsSize; ushort fieldCount = policy.sendSetName ? (ushort)2 : (ushort)1; // Calculate size of bin names. int binNameSize = 0; int operationCount = 0; if (binNames != null) { foreach (string binName in binNames) { binNameSize += ByteUtil.EstimateSizeUtf8(binName) + OPERATION_HEADER_SIZE; } operationCount = binNames.Length; } // Estimate buffer size. Begin(); dataOffset += FIELD_HEADER_SIZE + 5; Key prev = null; for (int i = 0; i < max; i++) { Key key = keys[offsets[i]]; dataOffset += key.digest.Length + 4; // Try reference equality in hope that namespace for all keys is set from a fixed variable. if (prev != null && prev.ns == key.ns && (!policy.sendSetName || prev.setName == key.setName)) { // Can set repeat previous namespace/bin names to save space. dataOffset++; } else { // Estimate full header, namespace and bin names. dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE + 6; if (policy.sendSetName) { dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; } dataOffset += binNameSize; prev = key; } } SizeBuffer(); if (policy.consistencyLevel == ConsistencyLevel.CONSISTENCY_ALL) { readAttr |= Command.INFO1_CONSISTENCY_ALL; } WriteHeader(policy, readAttr | Command.INFO1_BATCH, 0, 1, 0); int fieldSizeOffset = dataOffset; WriteFieldHeader(0, policy.sendSetName ? FieldType.BATCH_INDEX_WITH_SET : FieldType.BATCH_INDEX); // Need to update size at end ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); dataOffset += 4; dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0; prev = null; for (int i = 0; i < max; i++) { int index = offsets[i]; ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); dataOffset += 4; Key key = keys[index]; byte[] digest = key.digest; Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); dataOffset += digest.Length; // Try reference equality in hope that namespace for all keys is set from a fixed variable. if (prev != null && prev.ns == key.ns && (!policy.sendSetName || prev.setName == key.setName)) { // Can set repeat previous namespace/bin names to save space. dataBuffer[dataOffset++] = 1; // repeat } else { // Write full header, namespace and bin names. dataBuffer[dataOffset++] = 0; // do not repeat dataBuffer[dataOffset++] = (byte)readAttr; dataOffset += ByteUtil.ShortToBytes(fieldCount, dataBuffer, dataOffset); dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); WriteField(key.ns, FieldType.NAMESPACE); if (policy.sendSetName) { WriteField(key.setName, FieldType.TABLE); } if (binNames != null) { foreach (string binName in binNames) { WriteOperation(binName, Operation.Type.READ); } } prev = key; } } // Write real field size. ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); End(); }
public BatchGetArrayDirect( Node node, BatchNode.BatchNamespace batch, Policy policy, Key[] keys, string[] binNames, Record[] records, int readAttr ) : base(node, false) { this.batch = batch; this.policy = policy; this.keys = keys; this.binNames = binNames; this.records = records; this.readAttr = readAttr; }
public AsyncBatchExistsSequenceCommand( AsyncMultiExecutor parent, AsyncCluster cluster, BatchNode batch, BatchPolicy policy, Key[] keys, ExistsSequenceListener listener ) : base(parent, cluster, (AsyncNode)batch.node, false) { this.batch = batch; this.policy = policy; this.keys = keys; this.listener = listener; }
public void SetBatchRead(BatchPolicy policy, List<BatchRead> records, BatchNode batch) { // Estimate full row size int[] offsets = batch.offsets; int max = batch.offsetsSize; BatchRead prev = null; Begin(); dataOffset += FIELD_HEADER_SIZE + 5; for (int i = 0; i < max; i++) { BatchRead record = records[offsets[i]]; Key key = record.key; string[] binNames = record.binNames; dataOffset += key.digest.Length + 4; // Avoid relatively expensive full equality checks for performance reasons. // Use reference equality only in hope that common namespaces/bin names are set from // fixed variables. It's fine if equality not determined correctly because it just // results in more space used. The batch will still be correct. if (prev != null && prev.key.ns == key.ns && prev.binNames == binNames && prev.readAllBins == record.readAllBins) { // Can set repeat previous namespace/bin names to save space. dataOffset++; } else { // Estimate full header, namespace and bin names. dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE + 6; if (binNames != null) { foreach (string binName in binNames) { EstimateOperationSize(binName); } } prev = record; } } SizeBuffer(); WriteHeader(policy, Command.INFO1_READ | Command.INFO1_BATCH, 0, 1, 0); int fieldSizeOffset = dataOffset; WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); dataOffset += 4; dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0; prev = null; for (int i = 0; i < max; i++) { int index = offsets[i]; ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); dataOffset += 4; BatchRead record = records[index]; Key key = record.key; string[] binNames = record.binNames; byte[] digest = key.digest; Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); dataOffset += digest.Length; // Avoid relatively expensive full equality checks for performance reasons. // Use reference equality only in hope that common namespaces/bin names are set from // fixed variables. It's fine if equality not determined correctly because it just // results in more space used. The batch will still be correct. if (prev != null && prev.key.ns == key.ns && prev.binNames == binNames && prev.readAllBins == record.readAllBins) { // Can set repeat previous namespace/bin names to save space. dataBuffer[dataOffset++] = 1; // repeat } else { // Write full header, namespace and bin names. dataBuffer[dataOffset++] = 0; // do not repeat if (binNames != null && binNames.Length != 0) { dataBuffer[dataOffset++] = (byte)Command.INFO1_READ; dataBuffer[dataOffset++] = 0; // pad dataBuffer[dataOffset++] = 0; // pad ByteUtil.ShortToBytes((ushort)binNames.Length, dataBuffer, dataOffset); dataOffset += 2; WriteField(key.ns, FieldType.NAMESPACE); foreach (string binName in binNames) { WriteOperation(binName, Operation.Type.READ); } } else { dataBuffer[dataOffset++] = (byte)(Command.INFO1_READ | (record.readAllBins ? Command.INFO1_GET_ALL : Command.INFO1_NOBINDATA)); dataBuffer[dataOffset++] = 0; // pad dataBuffer[dataOffset++] = 0; // pad ByteUtil.ShortToBytes(0, dataBuffer, dataOffset); dataOffset += 2; WriteField(key.ns, FieldType.NAMESPACE); } prev = record; } } // Write real field size. ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); End(); }
public void SetBatchReadDirect(Policy policy, Key[] keys, BatchNode.BatchNamespace batch, string[] binNames, int readAttr) { // Estimate buffer size Begin(); int byteSize = batch.offsetsSize * SyncCommand.DIGEST_SIZE; dataOffset += ByteUtil.EstimateSizeUtf8(batch.ns) + FIELD_HEADER_SIZE + byteSize + FIELD_HEADER_SIZE; if (binNames != null) { foreach (string binName in binNames) { EstimateOperationSize(binName); } } SizeBuffer(); int operationCount = (binNames == null)? 0 : binNames.Length; WriteHeader(policy, readAttr, 0, 2, operationCount); WriteField(batch.ns, FieldType.NAMESPACE); WriteFieldHeader(byteSize, FieldType.DIGEST_RIPE_ARRAY); int[] offsets = batch.offsets; int max = batch.offsetsSize; for (int i = 0; i < max; i++) { Key key = keys[offsets[i]]; byte[] digest = key.digest; Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); dataOffset += digest.Length; } if (binNames != null) { foreach (string binName in binNames) { WriteOperation(binName, Operation.Type.READ); } } End(); }
public void SetBatchRead(BatchPolicy policy, Key[] keys, BatchNode batch, string[] binNames, int readAttr) { // Estimate full row size int[] offsets = batch.offsets; int max = batch.offsetsSize; int rowSize = 30 + FIELD_HEADER_SIZE + 31; // Row's header(30) + max namespace(31). int operationCount = 0; if (binNames != null) { foreach (string binName in binNames) { EstimateOperationSize(binName); } rowSize += dataOffset; operationCount = binNames.Length; } // Estimate buffer size. Begin(); dataOffset += FIELD_HEADER_SIZE + 5; string prevNamespace = null; for (int i = 0; i < max; i++) { Key key = keys[offsets[i]]; // Try reference equality in hope that namespace for all keys is set from a fixed variable. if (key.ns == prevNamespace || (prevNamespace != null && prevNamespace.Equals(key.ns))) { // Can set repeat previous namespace/bin names to save space. dataOffset += 25; } else { // Must write full header and namespace/bin names. dataOffset += rowSize; prevNamespace = key.ns; } } SizeBuffer(); WriteHeader(policy, readAttr | Command.INFO1_BATCH, 0, 1, 0); int fieldSizeOffset = dataOffset; WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); dataOffset += 4; dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0; prevNamespace = null; for (int i = 0; i < max; i++) { int index = offsets[i]; ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); dataOffset += 4; Key key = keys[index]; byte[] digest = key.digest; Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); dataOffset += digest.Length; // Try reference equality in hope that namespace for all keys is set from a fixed variable. if (key.ns == prevNamespace || (prevNamespace != null && prevNamespace.Equals(key.ns))) { // Can set repeat previous namespace/bin names to save space. dataBuffer[dataOffset++] = 1; // repeat } else { // Write full header, namespace and bin names. dataBuffer[dataOffset++] = 0; // do not repeat dataBuffer[dataOffset++] = (byte)readAttr; dataBuffer[dataOffset++] = 0; // pad dataBuffer[dataOffset++] = 0; // pad ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); dataOffset += 2; WriteField(key.ns, FieldType.NAMESPACE); if (binNames != null) { foreach (string binName in binNames) { WriteOperation(binName, Operation.Type.READ); } } prevNamespace = key.ns; } } // Write real field size. ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); End(); }
public BatchExistsArrayDirect( Node node, BatchNode.BatchNamespace batch, Policy policy, Key[] keys, bool[] existsArray ) : base(node, false) { this.batch = batch; this.policy = policy; this.keys = keys; this.existsArray = existsArray; }
public BatchGetArrayCommand( BatchNode batch, BatchPolicy policy, Key[] keys, string[] binNames, Record[] records, int readAttr ) : base(batch.node, false) { this.batch = batch; this.policy = policy; this.keys = keys; this.binNames = binNames; this.records = records; this.readAttr = readAttr; }
public AsyncBatchExistsArrayCommand( AsyncMultiExecutor parent, AsyncCluster cluster, BatchNode batch, BatchPolicy policy, Key[] keys, bool[] existsArray ) : base(parent, cluster, (AsyncNode)batch.node, false) { this.batch = batch; this.policy = policy; this.keys = keys; this.existsArray = existsArray; }
public AsyncBatchExistsArrayDirect( AsyncMultiExecutor parent, AsyncCluster cluster, AsyncNode node, BatchNode.BatchNamespace batch, Policy policy, Key[] keys, bool[] existsArray ) : base(parent, cluster, node, false) { this.batch = batch; this.policy = policy; this.keys = keys; this.existsArray = existsArray; }
public AsyncBatchGetArrayCommand( AsyncMultiExecutor parent, AsyncCluster cluster, BatchNode batch, BatchPolicy policy, Key[] keys, string[] binNames, Record[] records, int readAttr ) : base(parent, cluster, (AsyncNode)batch.node, false) { this.batch = batch; this.policy = policy; this.keys = keys; this.binNames = binNames; this.records = records; this.readAttr = readAttr; }
public AsyncBatchExistsSequenceDirect( AsyncMultiExecutor parent, AsyncCluster cluster, AsyncNode node, BatchNode.BatchNamespace batch, Policy policy, Key[] keys, ExistsSequenceListener listener ) : base(parent, cluster, node, false) { this.batch = batch; this.policy = policy; this.keys = keys; this.listener = listener; }
public static void Execute(Cluster cluster, BatchPolicy policy, Key[] keys, bool[] existsArray, Record[] records, HashSet <string> binNames, int readAttr) { if (keys.Length == 0) { return; } if (policy.allowProleReads) { // Send all requests to a single node chosen in round-robin fashion in this transaction thread. Node node = cluster.GetRandomNode(); if (records != null) { BatchCommandNodeGet command = new BatchCommandNodeGet(node, policy, keys, records, binNames, readAttr); command.Execute(); } else { BatchCommandNodeExists command = new BatchCommandNodeExists(node, policy, keys, existsArray); command.Execute(); } return; } List <BatchNode> batchNodes = BatchNode.GenerateList(cluster, policy, keys); if (policy.maxConcurrentThreads == 1) { // Run batch requests sequentially in same thread. foreach (BatchNode batchNode in batchNodes) { foreach (BatchNode.BatchNamespace batchNamespace in batchNode.batchNamespaces) { if (records != null) { BatchCommandGet command = new BatchCommandGet(batchNode.node, batchNamespace, policy, keys, binNames, records, readAttr); command.Execute(); } else { BatchCommandExists command = new BatchCommandExists(batchNode.node, batchNamespace, policy, keys, existsArray); command.Execute(); } } } } else { // Run batch requests in parallel in separate threads. Executor executor = new Executor(batchNodes.Count * 2); // Initialize threads. There may be multiple threads for a single node because the // wire protocol only allows one namespace per command. Multiple namespaces // require multiple threads per node. foreach (BatchNode batchNode in batchNodes) { foreach (BatchNode.BatchNamespace batchNamespace in batchNode.batchNamespaces) { if (records != null) { MultiCommand command = new BatchCommandGet(batchNode.node, batchNamespace, policy, keys, binNames, records, readAttr); executor.AddCommand(command); } else { MultiCommand command = new BatchCommandExists(batchNode.node, batchNamespace, policy, keys, existsArray); executor.AddCommand(command); } } } executor.Execute(policy.maxConcurrentThreads); } }
public AsyncBatchGetSequenceDirect( AsyncMultiExecutor parent, AsyncCluster cluster, AsyncNode node, BatchNode.BatchNamespace batch, Policy policy, Key[] keys, string[] binNames, RecordSequenceListener listener, int readAttr ) : base(parent, cluster, node, false) { this.batch = batch; this.policy = policy; this.keys = keys; this.binNames = binNames; this.listener = listener; this.readAttr = readAttr; }
public BatchReadListCommand(BatchNode batch, BatchPolicy policy, List<BatchRead> records) : base(batch.node, false) { this.batch = batch; this.policy = policy; this.records = records; }
public static void Execute ( Cluster cluster, BatchPolicy policy, Key[] keys, bool[] existsArray, Record[] records, string[] binNames, int readAttr ) { if (keys.Length == 0) { return; } if (policy.allowProleReads) { // Send all requests to a single node chosen in round-robin fashion in this transaction thread. Node node = cluster.GetRandomNode(); BatchNode batchNode = new BatchNode(node, keys); ExecuteNode(cluster, batchNode, policy, keys, existsArray, records, binNames, readAttr); return; } List <BatchNode> batchNodes = BatchNode.GenerateList(cluster, policy, keys); if (policy.maxConcurrentThreads == 1 || batchNodes.Count <= 1) { // Run batch requests sequentially in same thread. foreach (BatchNode batchNode in batchNodes) { ExecuteNode(cluster, batchNode, policy, keys, existsArray, records, binNames, readAttr); } } else { // Run batch requests in parallel in separate threads. // // Multiple threads write to the record/exists array, so one might think that // volatile or memory barriers are needed on the write threads and this read thread. // This should not be necessary here because it happens in Executor which does a // volatile write (Interlocked.Increment(ref completedCount)) at the end of write threads // and a synchronized WaitTillComplete() in this thread. Executor executor = new Executor(batchNodes.Count * 2); // Initialize threads. foreach (BatchNode batchNode in batchNodes) { if (records != null) { MultiCommand command = new BatchGetArrayCommand(cluster, executor, batchNode, policy, keys, binNames, records, readAttr); executor.AddCommand(command); } else { MultiCommand command = new BatchExistsArrayCommand(cluster, executor, batchNode, policy, keys, existsArray); executor.AddCommand(command); } } executor.Execute(policy.maxConcurrentThreads); } }
public void SetBatchRead(BatchPolicy policy, Key[] keys, BatchNode batch, string[] binNames, int readAttr) { // Estimate full row size int[] offsets = batch.offsets; int max = batch.offsetsSize; ushort fieldCount = policy.sendSetName ? (ushort)2 : (ushort)1; // Calculate size of bin names. int binNameSize = 0; int operationCount = 0; if (binNames != null) { foreach (string binName in binNames) { binNameSize += ByteUtil.EstimateSizeUtf8(binName) + OPERATION_HEADER_SIZE; } operationCount = binNames.Length; } // Estimate buffer size. Begin(); dataOffset += FIELD_HEADER_SIZE + 5; Key prev = null; for (int i = 0; i < max; i++) { Key key = keys[offsets[i]]; dataOffset += key.digest.Length + 4; // Try reference equality in hope that namespace for all keys is set from a fixed variable. if (prev != null && prev.ns == key.ns && (! policy.sendSetName || prev.setName == key.setName)) { // Can set repeat previous namespace/bin names to save space. dataOffset++; } else { // Estimate full header, namespace and bin names. dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE + 6; if (policy.sendSetName) { dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; } dataOffset += binNameSize; prev = key; } } SizeBuffer(); WriteHeader(policy, readAttr | Command.INFO1_BATCH, 0, 1, 0); int fieldSizeOffset = dataOffset; WriteFieldHeader(0, policy.sendSetName ? FieldType.BATCH_INDEX_WITH_SET : FieldType.BATCH_INDEX); // Need to update size at end ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); dataOffset += 4; dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0; prev = null; for (int i = 0; i < max; i++) { int index = offsets[i]; ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); dataOffset += 4; Key key = keys[index]; byte[] digest = key.digest; Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); dataOffset += digest.Length; // Try reference equality in hope that namespace for all keys is set from a fixed variable. if (prev != null && prev.ns == key.ns && (!policy.sendSetName || prev.setName == key.setName)) { // Can set repeat previous namespace/bin names to save space. dataBuffer[dataOffset++] = 1; // repeat } else { // Write full header, namespace and bin names. dataBuffer[dataOffset++] = 0; // do not repeat dataBuffer[dataOffset++] = (byte)readAttr; dataOffset += ByteUtil.ShortToBytes(fieldCount, dataBuffer, dataOffset); dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); WriteField(key.ns, FieldType.NAMESPACE); if (policy.sendSetName) { WriteField(key.setName, FieldType.TABLE); } if (binNames != null) { foreach (string binName in binNames) { WriteOperation(binName, Operation.Type.READ); } } prev = key; } } // Write real field size. ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); End(); }