Exemple #1
0
        public Node GetSequenceNode(Cluster cluster, Partition partition)
        {
            // Must copy hashmap reference for copy on write semantics to work.
            Dictionary <string, Node[][]> map = cluster.partitionMap;

            Node[][] replicaArray;

            if (map.TryGetValue(partition.ns, out replicaArray))
            {
                for (int i = 0; i < replicaArray.Length; i++)
                {
                    int index = Math.Abs(sequence % replicaArray.Length);
                    sequence++;
                    Node node = replicaArray[index][partition.partitionId];

                    if (node != null && node.Active)
                    {
                        return(node);
                    }
                }
            }

            /*
             * if (Log.debugEnabled()) {
             *      Log.debug("Choose random node for " + partition);
             * }
             */
            return(cluster.GetRandomNode());
        }
        public Node GetSequenceNode(Cluster cluster, Partition partition)
        {
            // Must copy hashmap reference for copy on write semantics to work.
            Dictionary <string, Partitions> map = cluster.partitionMap;
            Partitions partitions;

            if (!map.TryGetValue(partition.ns, out partitions))
            {
                throw new AerospikeException("Invalid namespace: " + partition.ns);
            }

            Node[][] replicas = partitions.replicas;

            for (int i = 0; i < replicas.Length; i++)
            {
                uint index = sequence % (uint)replicas.Length;
                Node node  = replicas[index][partition.partitionId];

                if (node != null && node.Active)
                {
                    return(node);
                }
                sequence++;
            }

            if (partitions.cpMode)
            {
                throw new AerospikeException.InvalidNode();
            }
            return(cluster.GetRandomNode());
        }
Exemple #3
0
        private void ExecuteCommand(Cluster cluster, AdminPolicy policy)
        {
            WriteSize();
            Node       node    = cluster.GetRandomNode();
            int        timeout = (policy == null) ? 1000 : policy.timeout;
            Connection conn    = node.GetConnection(timeout);

            try
            {
                conn.Write(dataBuffer, dataOffset);
                conn.ReadFully(dataBuffer, HEADER_SIZE);
                node.PutConnection(conn);
            }
            catch (Exception)
            {
                // Garbage may be in socket.  Do not put back into pool.
                node.CloseConnection(conn);
                throw;
            }

            int result = dataBuffer[RESULT_CODE];

            if (result != 0)
            {
                throw new AerospikeException(result);
            }
        }
Exemple #4
0
        private void ExecuteQuery(Cluster cluster, AdminPolicy policy)
        {
            WriteSize();
            Node       node    = cluster.GetRandomNode();
            int        timeout = (policy == null) ? 1000 : policy.timeout;
            int        status  = 0;
            Connection conn    = node.GetConnection(timeout);

            try
            {
                conn.Write(dataBuffer, dataOffset);
                status = ReadBlocks(conn);
                node.PutConnection(conn);
            }
            catch (Exception e)
            {
                // Garbage may be in socket.  Do not put back into pool.
                node.CloseConnection(conn);
                throw new AerospikeException(e);
            }

            if (status != QUERY_END && status > 0)
            {
                throw new AerospikeException(status, "Query failed.");
            }
        }
        public Node GetNode(Cluster cluster, Policy policy, Partition partition, bool isRead)
        {
            // Must copy hashmap reference for copy on write semantics to work.
            Dictionary <string, Partitions> map = cluster.partitionMap;
            Partitions partitions;

            if (!map.TryGetValue(partition.ns, out partitions))
            {
                throw new AerospikeException.InvalidNamespace(partition.ns, map.Count);
            }

            if (partitions.cpMode && isRead && !policy.linearizeRead)
            {
                // Strong Consistency namespaces always use master node when read policy is sequential.
                return(cluster.GetMasterNode(partitions, partition));
            }

            // Handle default case first.
            if (policy.replica == Replica.SEQUENCE)
            {
                // Sequence always starts at master, so writes can go through the same algorithm.
                return(GetSequenceNode(cluster, partitions, partition));
            }

            if (!isRead)
            {
                // Writes will always proxy to master node.
                return(cluster.GetMasterNode(partitions, partition));
            }

            switch (policy.replica)
            {
            default:
            case Replica.MASTER:
                return(cluster.GetMasterNode(partitions, partition));

            case Replica.PREFER_RACK:
                return(GetRackNode(cluster, partitions, partition));

            case Replica.MASTER_PROLES:
                return(cluster.GetMasterProlesNode(partitions, partition));

            case Replica.RANDOM:
                return(cluster.GetRandomNode());
            }
        }
Exemple #6
0
        public Node GetReadNode(Cluster cluster, Partition partition, Replica replica)
        {
            switch (replica)
            {
            case Replica.MASTER:
                return(cluster.GetMasterNode(partition));

            case Replica.MASTER_PROLES:
                return(cluster.GetMasterProlesNode(partition));

            case Replica.SEQUENCE:
                return(GetSequenceNode(cluster, partition));

            default:
            case Replica.RANDOM:
                return(cluster.GetRandomNode());
            }
        }
Exemple #7
0
        public Node GetNode(Cluster cluster, Partition partition, Replica replica, bool isRead)
        {
            // Handle default case first.
            if (replica == Replica.SEQUENCE)
            {
                return(GetSequenceNode(cluster, partition));
            }

            if (replica == Replica.MASTER || !isRead)
            {
                return(cluster.GetMasterNode(partition));
            }

            if (replica == Replica.MASTER_PROLES)
            {
                return(cluster.GetMasterProlesNode(partition));
            }
            return(cluster.GetRandomNode());
        }
Exemple #8
0
        public Node GetSequenceNode(Cluster cluster, Partition partition)
        {
            // Must copy hashmap reference for copy on write semantics to work.
            Dictionary <string, Partitions> map = cluster.partitionMap;
            Partitions partitions;

            if (!map.TryGetValue(partition.ns, out partitions))
            {
                // Add these lines
                StringBuilder sb = new StringBuilder(1000);
                sb.Append("Invalid namespace: ");
                sb.Append(partition.ns);
                sb.Append(" current namespaces: ");

                foreach (string key in map.Keys)
                {
                    sb.Append(key);
                    sb.Append(' ');
                }
                throw new AerospikeException(sb.ToString());
            }

            Node[][] replicas = partitions.replicas;

            for (int i = 0; i < replicas.Length; i++)
            {
                uint index = sequence % (uint)replicas.Length;
                Node node  = replicas[index][partition.partitionId];

                if (node != null && node.Active)
                {
                    return(node);
                }
                sequence++;
            }

            if (partitions.cpMode)
            {
                throw new AerospikeException.InvalidNode();
            }
            return(cluster.GetRandomNode());
        }
        public Node GetNodeRead(Cluster cluster)
        {
            switch (replica)
            {
            default:
            case Replica.SEQUENCE:
                return(GetSequenceNode(cluster));

            case Replica.PREFER_RACK:
                return(GetRackNode(cluster));

            case Replica.MASTER:
                return(GetMasterNode(cluster));

            case Replica.MASTER_PROLES:
                return(GetMasterProlesNode(cluster));

            case Replica.RANDOM:
                return(cluster.GetRandomNode());
            }
        }
Exemple #10
0
        private static Node GetNode(Cluster cluster, Policy policy, Partition partition, uint sequence)
        {
            // Must copy hashmap reference for copy on write semantics to work.
            Dictionary <string, Partitions> map = cluster.partitionMap;
            Partitions partitions;

            if (!map.TryGetValue(partition.ns, out partitions))
            {
                throw new AerospikeException.InvalidNamespace(partition.ns, map.Count);
            }

            if (partitions.cpMode && !policy.linearizeRead)
            {
                // Strong Consistency namespaces always use master node when read policy is sequential.
                return(cluster.GetMasterNode(partitions, partition));
            }

            switch (policy.replica)
            {
            case Replica.SEQUENCE:
                return(GetSequenceNode(cluster, partitions, partition, sequence));

            case Replica.PREFER_RACK:
                return(GetRackNode(cluster, partitions, partition, sequence));

            default:
            case Replica.MASTER:
                return(cluster.GetMasterNode(partitions, partition));

            case Replica.MASTER_PROLES:
                return(cluster.GetMasterProlesNode(partitions, partition));

            case Replica.RANDOM:
                return(cluster.GetRandomNode());
            }
        }
        private void ExecuteQuery(Cluster cluster, AdminPolicy policy)
        {
            WriteSize();
            Node node = cluster.GetRandomNode();
            int timeout = (policy == null) ? 1000 : policy.timeout;
            int status = 0;
            Connection conn = node.GetConnection(timeout);

            try
            {
                conn.Write(dataBuffer, dataOffset);
                status = ReadBlocks(conn);
                node.PutConnection(conn);
            }
            catch (Exception e)
            {
                // Garbage may be in socket.  Do not put back into pool.
                node.CloseConnection(conn);
                throw new AerospikeException(e);
            }

            if (status != QUERY_END && status > 0)
            {
                throw new AerospikeException(status, "Query failed.");
            }
        }
        public static void Execute
        (
            Cluster cluster,
            BatchPolicy policy,
            Key[] keys,
            bool[] existsArray,
            Record[] records,
            string[] binNames,
            int readAttr
        )
        {
            if (keys.Length == 0)
            {
                return;
            }

            if (policy.allowProleReads)
            {
                // Send all requests to a single node chosen in round-robin fashion in this transaction thread.
                Node      node      = cluster.GetRandomNode();
                BatchNode batchNode = new BatchNode(node, keys);
                ExecuteNode(cluster, batchNode, policy, keys, existsArray, records, binNames, readAttr);
                return;
            }

            List <BatchNode> batchNodes = BatchNode.GenerateList(cluster, policy, keys);

            if (policy.maxConcurrentThreads == 1 || batchNodes.Count <= 1)
            {
                // Run batch requests sequentially in same thread.
                foreach (BatchNode batchNode in batchNodes)
                {
                    ExecuteNode(cluster, batchNode, policy, keys, existsArray, records, binNames, readAttr);
                }
            }
            else
            {
                // Run batch requests in parallel in separate threads.
                //
                // Multiple threads write to the record/exists array, so one might think that
                // volatile or memory barriers are needed on the write threads and this read thread.
                // This should not be necessary here because it happens in Executor which does a
                // volatile write (Interlocked.Increment(ref completedCount)) at the end of write threads
                // and a synchronized WaitTillComplete() in this thread.
                Executor executor = new Executor(batchNodes.Count * 2);

                // Initialize threads.
                foreach (BatchNode batchNode in batchNodes)
                {
                    if (records != null)
                    {
                        MultiCommand command = new BatchGetArrayCommand(cluster, executor, batchNode, policy, keys, binNames, records, readAttr);
                        executor.AddCommand(command);
                    }
                    else
                    {
                        MultiCommand command = new BatchExistsArrayCommand(cluster, executor, batchNode, policy, keys, existsArray);
                        executor.AddCommand(command);
                    }
                }
                executor.Execute(policy.maxConcurrentThreads);
            }
        }
Exemple #13
0
        public static void Execute(Cluster cluster, BatchPolicy policy, Key[] keys, bool[] existsArray, Record[] records, HashSet <string> binNames, int readAttr)
        {
            if (keys.Length == 0)
            {
                return;
            }

            if (policy.allowProleReads)
            {
                // Send all requests to a single node chosen in round-robin fashion in this transaction thread.
                Node node = cluster.GetRandomNode();

                if (records != null)
                {
                    BatchCommandNodeGet command = new BatchCommandNodeGet(node, policy, keys, records, binNames, readAttr);
                    command.Execute();
                }
                else
                {
                    BatchCommandNodeExists command = new BatchCommandNodeExists(node, policy, keys, existsArray);
                    command.Execute();
                }
                return;
            }

            List <BatchNode> batchNodes = BatchNode.GenerateList(cluster, policy, keys);

            if (policy.maxConcurrentThreads == 1)
            {
                // Run batch requests sequentially in same thread.
                foreach (BatchNode batchNode in batchNodes)
                {
                    foreach (BatchNode.BatchNamespace batchNamespace in batchNode.batchNamespaces)
                    {
                        if (records != null)
                        {
                            BatchCommandGet command = new BatchCommandGet(batchNode.node, batchNamespace, policy, keys, binNames, records, readAttr);
                            command.Execute();
                        }
                        else
                        {
                            BatchCommandExists command = new BatchCommandExists(batchNode.node, batchNamespace, policy, keys, existsArray);
                            command.Execute();
                        }
                    }
                }
            }
            else
            {
                // Run batch requests in parallel in separate threads.
                Executor executor = new Executor(batchNodes.Count * 2);

                // Initialize threads.  There may be multiple threads for a single node because the
                // wire protocol only allows one namespace per command.  Multiple namespaces
                // require multiple threads per node.
                foreach (BatchNode batchNode in batchNodes)
                {
                    foreach (BatchNode.BatchNamespace batchNamespace in batchNode.batchNamespaces)
                    {
                        if (records != null)
                        {
                            MultiCommand command = new BatchCommandGet(batchNode.node, batchNamespace, policy, keys, binNames, records, readAttr);
                            executor.AddCommand(command);
                        }
                        else
                        {
                            MultiCommand command = new BatchCommandExists(batchNode.node, batchNamespace, policy, keys, existsArray);
                            executor.AddCommand(command);
                        }
                    }
                }

                executor.Execute(policy.maxConcurrentThreads);
            }
        }
        public static void Execute(Cluster cluster, BatchPolicy policy, Key[] keys, bool[] existsArray, Record[] records, string[] binNames, int readAttr)
        {
            if (keys.Length == 0)
            {
                return;
            }

            if (policy.allowProleReads)
            {
                // Send all requests to a single node chosen in round-robin fashion in this transaction thread.
                Node node = cluster.GetRandomNode();
                BatchNode batchNode = new BatchNode(node, keys);
                ExecuteNode(batchNode, policy, keys, existsArray, records, binNames, readAttr);
                return;
            }

            List<BatchNode> batchNodes = BatchNode.GenerateList(cluster, policy, keys);

            if (policy.maxConcurrentThreads == 1 || batchNodes.Count <= 1)
            {
                // Run batch requests sequentially in same thread.
                foreach (BatchNode batchNode in batchNodes)
                {
                    ExecuteNode(batchNode, policy, keys, existsArray, records, binNames, readAttr);
                }
            }
            else
            {
                // Run batch requests in parallel in separate threads.
                //
                // Multiple threads write to the record/exists array, so one might think that
                // volatile or memory barriers are needed on the write threads and this read thread.
                // This should not be necessary here because it happens in Executor which does a
                // volatile write (Interlocked.Increment(ref completedCount)) at the end of write threads
                // and a synchronized WaitTillComplete() in this thread.
                Executor executor = new Executor(batchNodes.Count * 2);

                // Initialize threads.
                foreach (BatchNode batchNode in batchNodes)
                {
                    if (batchNode.node.UseNewBatch(policy))
                    {
                        // New batch
                        if (records != null)
                        {
                            MultiCommand command = new BatchGetArrayCommand(batchNode, policy, keys, binNames, records, readAttr);
                            executor.AddCommand(command);
                        }
                        else
                        {
                            MultiCommand command = new BatchExistsArrayCommand(batchNode, policy, keys, existsArray);
                            executor.AddCommand(command);
                        }
                    }
                    else
                    {
                        // There may be multiple threads for a single node because the
                        // wire protocol only allows one namespace per command.  Multiple namespaces
                        // require multiple threads per node.
                        batchNode.SplitByNamespace(keys);

                        foreach (BatchNode.BatchNamespace batchNamespace in batchNode.batchNamespaces)
                        {
                            if (records != null)
                            {
                                MultiCommand command = new BatchGetArrayDirect(batchNode.node, batchNamespace, policy, keys, binNames, records, readAttr);
                                executor.AddCommand(command);
                            }
                            else
                            {
                                MultiCommand command = new BatchExistsArrayDirect(batchNode.node, batchNamespace, policy, keys, existsArray);
                                executor.AddCommand(command);
                            }
                        }
                    }
                }
                executor.Execute(policy.maxConcurrentThreads);
            }
        }
        private void ExecuteCommand(Cluster cluster, AdminPolicy policy)
        {
            WriteSize();
            Node node = cluster.GetRandomNode();
            int timeout = (policy == null) ? 1000 : policy.timeout;
            Connection conn = node.GetConnection(timeout);

            try
            {
                conn.Write(dataBuffer, dataOffset);
                conn.ReadFully(dataBuffer, HEADER_SIZE);
                node.PutConnection(conn);
            }
            catch (Exception)
            {
                // Garbage may be in socket.  Do not put back into pool.
                node.CloseConnection(conn);
                throw;
            }

            int result = dataBuffer[RESULT_CODE];

            if (result != 0)
            {
                throw new AerospikeException(result);
            }
        }
        public static RegisterTask Register(Cluster cluster, Policy policy, string content, string serverPath, Language language)
        {
            StringBuilder sb = new StringBuilder(serverPath.Length + content.Length + 100);
            sb.Append("udf-put:filename=");
            sb.Append(serverPath);
            sb.Append(";content=");
            sb.Append(content);
            sb.Append(";content-len=");
            sb.Append(content.Length);
            sb.Append(";udf-type=");
            sb.Append(language);
            sb.Append(";");

            // Send UDF to one node. That node will distribute the UDF to other nodes.
            string command = sb.ToString();
            Node node = cluster.GetRandomNode();
            Connection conn = node.GetConnection(policy.timeout);

            try
            {
                Info info = new Info(conn, command);
                Info.NameValueParser parser = info.GetNameValueParser();
                string error = null;
                string file = null;
                string line = null;
                string message = null;

                while (parser.Next())
                {
                    string name = parser.GetName();

                    if (name.Equals("error"))
                    {
                        error = parser.GetValue();
                    }
                    else if (name.Equals("file"))
                    {
                        file = parser.GetValue();
                    }
                    else if (name.Equals("line"))
                    {
                        line = parser.GetValue();
                    }
                    else if (name.Equals("message"))
                    {
                        message = parser.GetStringBase64();
                    }
                }

                if (error != null)
                {
                    throw new AerospikeException("Registration failed: " + error + Environment.NewLine +
                        "File: " + file + Environment.NewLine +
                        "Line: " + line + Environment.NewLine +
                        "Message: " + message
                        );
                }
                node.PutConnection(conn);
                return new RegisterTask(cluster, policy, serverPath);
            }
            catch (Exception)
            {
                conn.Close();
                throw;
            }
        }
        public static RegisterTask Register(Cluster cluster, Policy policy, string content, string serverPath, Language language)
        {
            StringBuilder sb = new StringBuilder(serverPath.Length + content.Length + 100);

            sb.Append("udf-put:filename=");
            sb.Append(serverPath);
            sb.Append(";content=");
            sb.Append(content);
            sb.Append(";content-len=");
            sb.Append(content.Length);
            sb.Append(";udf-type=");
            sb.Append(language);
            sb.Append(";");

            // Send UDF to one node. That node will distribute the UDF to other nodes.
            string     command = sb.ToString();
            Node       node    = cluster.GetRandomNode();
            Connection conn    = node.GetConnection(policy.timeout);

            try
            {
                Info info = new Info(conn, command);
                Info.NameValueParser parser = info.GetNameValueParser();
                string error   = null;
                string file    = null;
                string line    = null;
                string message = null;

                while (parser.Next())
                {
                    string name = parser.GetName();

                    if (name.Equals("error"))
                    {
                        error = parser.GetValue();
                    }
                    else if (name.Equals("file"))
                    {
                        file = parser.GetValue();
                    }
                    else if (name.Equals("line"))
                    {
                        line = parser.GetValue();
                    }
                    else if (name.Equals("message"))
                    {
                        message = parser.GetStringBase64();
                    }
                }

                if (error != null)
                {
                    throw new AerospikeException("Registration failed: " + error + Environment.NewLine +
                                                 "File: " + file + Environment.NewLine +
                                                 "Line: " + line + Environment.NewLine +
                                                 "Message: " + message
                                                 );
                }
                node.PutConnection(conn);
                return(new RegisterTask(cluster, policy, serverPath));
            }
            catch (Exception)
            {
                node.CloseConnection(conn);
                throw;
            }
        }