public Node GetNode(Cluster cluster, Policy policy, Partition partition, bool isRead) { // Must copy hashmap reference for copy on write semantics to work. Dictionary <string, Partitions> map = cluster.partitionMap; Partitions partitions; if (!map.TryGetValue(partition.ns, out partitions)) { throw new AerospikeException.InvalidNamespace(partition.ns, map.Count); } if (partitions.cpMode && isRead && !policy.linearizeRead) { // Strong Consistency namespaces always use master node when read policy is sequential. return(cluster.GetMasterNode(partitions, partition)); } // Handle default case first. if (policy.replica == Replica.SEQUENCE) { // Sequence always starts at master, so writes can go through the same algorithm. return(GetSequenceNode(cluster, partitions, partition)); } if (!isRead) { // Writes will always proxy to master node. return(cluster.GetMasterNode(partitions, partition)); } switch (policy.replica) { default: case Replica.MASTER: return(cluster.GetMasterNode(partitions, partition)); case Replica.PREFER_RACK: return(GetRackNode(cluster, partitions, partition)); case Replica.MASTER_PROLES: return(cluster.GetMasterProlesNode(partitions, partition)); case Replica.RANDOM: return(cluster.GetRandomNode()); } }
private static Node GetNode(Cluster cluster, Policy policy, Partition partition, uint sequence) { // Must copy hashmap reference for copy on write semantics to work. Dictionary <string, Partitions> map = cluster.partitionMap; Partitions partitions; if (!map.TryGetValue(partition.ns, out partitions)) { throw new AerospikeException.InvalidNamespace(partition.ns, map.Count); } if (partitions.cpMode && !policy.linearizeRead) { // Strong Consistency namespaces always use master node when read policy is sequential. return(cluster.GetMasterNode(partitions, partition)); } switch (policy.replica) { case Replica.SEQUENCE: return(GetSequenceNode(cluster, partitions, partition, sequence)); case Replica.PREFER_RACK: return(GetRackNode(cluster, partitions, partition, sequence)); default: case Replica.MASTER: return(cluster.GetMasterNode(partitions, partition)); case Replica.MASTER_PROLES: return(cluster.GetMasterProlesNode(partitions, partition)); case Replica.RANDOM: return(cluster.GetRandomNode()); } }
public Node GetReadNode(Cluster cluster, Partition partition, Replica replica) { switch (replica) { case Replica.MASTER: return(cluster.GetMasterNode(partition)); case Replica.MASTER_PROLES: return(cluster.GetMasterProlesNode(partition)); case Replica.SEQUENCE: return(GetSequenceNode(cluster, partition)); default: case Replica.RANDOM: return(cluster.GetRandomNode()); } }
public Node GetNode(Cluster cluster, Partition partition, Replica replica, bool isRead) { // Handle default case first. if (replica == Replica.SEQUENCE) { return(GetSequenceNode(cluster, partition)); } if (replica == Replica.MASTER || !isRead) { return(cluster.GetMasterNode(partition)); } if (replica == Replica.MASTER_PROLES) { return(cluster.GetMasterProlesNode(partition)); } return(cluster.GetRandomNode()); }
public static List <BatchNode> GenerateList(Cluster cluster, BatchPolicy policy, List <BatchRead> records) { Node[] nodes = cluster.Nodes; if (nodes.Length == 0) { throw new AerospikeException(ResultCode.SERVER_NOT_AVAILABLE, "Command failed because cluster is empty."); } // Create initial key capacity for each node as average + 25%. int max = records.Count; int keysPerNode = max / nodes.Length; keysPerNode += (int)((uint)keysPerNode >> 2); // The minimum key capacity is 10. if (keysPerNode < 10) { keysPerNode = 10; } // Split keys by server node. List <BatchNode> batchNodes = new List <BatchNode>(nodes.Length); for (int i = 0; i < max; i++) { Partition partition = new Partition(records[i].key); Node node = cluster.GetMasterNode(partition); BatchNode batchNode = FindBatchNode(batchNodes, node); if (batchNode == null) { batchNodes.Add(new BatchNode(node, keysPerNode, i)); } else { batchNode.AddKey(i); } } return(batchNodes); }