public static void ScanNodes(Cluster cluster, ScanPolicy policy, string ns, string setName, string[] binNames, ScanCallback callback, Node[] nodes)
        {
            policy.Validate();

            // Detect cluster migrations when performing scan.
            ulong taskId     = RandomShift.ThreadLocalInstance.NextLong();
            ulong clusterKey = policy.failOnClusterChange ? QueryValidate.ValidateBegin(nodes[0], ns) : 0;
            bool  first      = true;

            if (policy.concurrentNodes && nodes.Length > 1)
            {
                Executor executor = new Executor(nodes.Length);

                foreach (Node node in nodes)
                {
                    ScanCommand command = new ScanCommand(cluster, node, policy, ns, setName, binNames, callback, taskId, clusterKey, first);
                    executor.AddCommand(command);
                    first = false;
                }
                executor.Execute(policy.maxConcurrentNodes);
            }
            else
            {
                foreach (Node node in nodes)
                {
                    ScanCommand command = new ScanCommand(cluster, node, policy, ns, setName, binNames, callback, taskId, clusterKey, first);
                    command.Execute();
                    first = false;
                }
            }
        }
        public static void ScanPartitions(Cluster cluster, ScanPolicy policy, string ns, string setName, string[] binNames, ScanCallback callback, PartitionTracker tracker)
        {
            policy.Validate();

            while (true)
            {
                ulong taskId = RandomShift.ThreadLocalInstance.NextLong();

                try
                {
                    List <NodePartitions> list = tracker.AssignPartitionsToNodes(cluster, ns);

                    if (policy.concurrentNodes && list.Count > 1)
                    {
                        Executor executor = new Executor(list.Count);

                        foreach (NodePartitions nodePartitions in list)
                        {
                            ScanPartitionCommand command = new ScanPartitionCommand(cluster, policy, ns, setName, binNames, callback, taskId, tracker, nodePartitions);
                            executor.AddCommand(command);
                        }

                        executor.Execute(policy.maxConcurrentNodes);
                    }
                    else
                    {
                        foreach (NodePartitions nodePartitions in list)
                        {
                            ScanPartitionCommand command = new ScanPartitionCommand(cluster, policy, ns, setName, binNames, callback, taskId, tracker, nodePartitions);
                            command.Execute();
                        }
                    }
                }
                catch (AerospikeException ae)
                {
                    ae.Iteration = tracker.iteration;
                    throw ae;
                }

                if (tracker.IsComplete(policy))
                {
                    // Scan is complete.
                    return;
                }

                if (policy.sleepBetweenRetries > 0)
                {
                    // Sleep before trying again.
                    Util.Sleep(policy.sleepBetweenRetries);
                }
            }
        }
        public static void Execute
        (
            Cluster cluster,
            BatchPolicy policy,
            Key[] keys,
            bool[] existsArray,
            Record[] records,
            string[] binNames,
            int readAttr
        )
        {
            if (keys.Length == 0)
            {
                return;
            }

            if (policy.allowProleReads)
            {
                // Send all requests to a single node chosen in round-robin fashion in this transaction thread.
                Node      node      = cluster.GetRandomNode();
                BatchNode batchNode = new BatchNode(node, keys);
                ExecuteNode(cluster, batchNode, policy, keys, existsArray, records, binNames, readAttr);
                return;
            }

            List <BatchNode> batchNodes = BatchNode.GenerateList(cluster, policy, keys);

            if (policy.maxConcurrentThreads == 1 || batchNodes.Count <= 1)
            {
                // Run batch requests sequentially in same thread.
                foreach (BatchNode batchNode in batchNodes)
                {
                    ExecuteNode(cluster, batchNode, policy, keys, existsArray, records, binNames, readAttr);
                }
            }
            else
            {
                // Run batch requests in parallel in separate threads.
                //
                // Multiple threads write to the record/exists array, so one might think that
                // volatile or memory barriers are needed on the write threads and this read thread.
                // This should not be necessary here because it happens in Executor which does a
                // volatile write (Interlocked.Increment(ref completedCount)) at the end of write threads
                // and a synchronized WaitTillComplete() in this thread.
                Executor executor = new Executor(batchNodes.Count * 2);

                // Initialize threads.
                foreach (BatchNode batchNode in batchNodes)
                {
                    if (records != null)
                    {
                        MultiCommand command = new BatchGetArrayCommand(cluster, executor, batchNode, policy, keys, binNames, records, readAttr);
                        executor.AddCommand(command);
                    }
                    else
                    {
                        MultiCommand command = new BatchExistsArrayCommand(cluster, executor, batchNode, policy, keys, existsArray);
                        executor.AddCommand(command);
                    }
                }
                executor.Execute(policy.maxConcurrentThreads);
            }
        }
Esempio n. 4
0
        public static void Execute(Cluster cluster, BatchPolicy policy, Key[] keys, bool[] existsArray, Record[] records, HashSet <string> binNames, int readAttr)
        {
            if (keys.Length == 0)
            {
                return;
            }

            if (policy.allowProleReads)
            {
                // Send all requests to a single node chosen in round-robin fashion in this transaction thread.
                Node node = cluster.GetRandomNode();

                if (records != null)
                {
                    BatchCommandNodeGet command = new BatchCommandNodeGet(node, policy, keys, records, binNames, readAttr);
                    command.Execute();
                }
                else
                {
                    BatchCommandNodeExists command = new BatchCommandNodeExists(node, policy, keys, existsArray);
                    command.Execute();
                }
                return;
            }

            List <BatchNode> batchNodes = BatchNode.GenerateList(cluster, policy, keys);

            if (policy.maxConcurrentThreads == 1)
            {
                // Run batch requests sequentially in same thread.
                foreach (BatchNode batchNode in batchNodes)
                {
                    foreach (BatchNode.BatchNamespace batchNamespace in batchNode.batchNamespaces)
                    {
                        if (records != null)
                        {
                            BatchCommandGet command = new BatchCommandGet(batchNode.node, batchNamespace, policy, keys, binNames, records, readAttr);
                            command.Execute();
                        }
                        else
                        {
                            BatchCommandExists command = new BatchCommandExists(batchNode.node, batchNamespace, policy, keys, existsArray);
                            command.Execute();
                        }
                    }
                }
            }
            else
            {
                // Run batch requests in parallel in separate threads.
                Executor executor = new Executor(batchNodes.Count * 2);

                // Initialize threads.  There may be multiple threads for a single node because the
                // wire protocol only allows one namespace per command.  Multiple namespaces
                // require multiple threads per node.
                foreach (BatchNode batchNode in batchNodes)
                {
                    foreach (BatchNode.BatchNamespace batchNamespace in batchNode.batchNamespaces)
                    {
                        if (records != null)
                        {
                            MultiCommand command = new BatchCommandGet(batchNode.node, batchNamespace, policy, keys, binNames, records, readAttr);
                            executor.AddCommand(command);
                        }
                        else
                        {
                            MultiCommand command = new BatchCommandExists(batchNode.node, batchNamespace, policy, keys, existsArray);
                            executor.AddCommand(command);
                        }
                    }
                }

                executor.Execute(policy.maxConcurrentThreads);
            }
        }
        public static void Execute(Cluster cluster, BatchPolicy policy, Key[] keys, bool[] existsArray, Record[] records, string[] binNames, int readAttr)
        {
            if (keys.Length == 0)
            {
                return;
            }

            if (policy.allowProleReads)
            {
                // Send all requests to a single node chosen in round-robin fashion in this transaction thread.
                Node node = cluster.GetRandomNode();
                BatchNode batchNode = new BatchNode(node, keys);
                ExecuteNode(batchNode, policy, keys, existsArray, records, binNames, readAttr);
                return;
            }

            List<BatchNode> batchNodes = BatchNode.GenerateList(cluster, policy, keys);

            if (policy.maxConcurrentThreads == 1 || batchNodes.Count <= 1)
            {
                // Run batch requests sequentially in same thread.
                foreach (BatchNode batchNode in batchNodes)
                {
                    ExecuteNode(batchNode, policy, keys, existsArray, records, binNames, readAttr);
                }
            }
            else
            {
                // Run batch requests in parallel in separate threads.
                //
                // Multiple threads write to the record/exists array, so one might think that
                // volatile or memory barriers are needed on the write threads and this read thread.
                // This should not be necessary here because it happens in Executor which does a
                // volatile write (Interlocked.Increment(ref completedCount)) at the end of write threads
                // and a synchronized WaitTillComplete() in this thread.
                Executor executor = new Executor(batchNodes.Count * 2);

                // Initialize threads.
                foreach (BatchNode batchNode in batchNodes)
                {
                    if (batchNode.node.UseNewBatch(policy))
                    {
                        // New batch
                        if (records != null)
                        {
                            MultiCommand command = new BatchGetArrayCommand(batchNode, policy, keys, binNames, records, readAttr);
                            executor.AddCommand(command);
                        }
                        else
                        {
                            MultiCommand command = new BatchExistsArrayCommand(batchNode, policy, keys, existsArray);
                            executor.AddCommand(command);
                        }
                    }
                    else
                    {
                        // There may be multiple threads for a single node because the
                        // wire protocol only allows one namespace per command.  Multiple namespaces
                        // require multiple threads per node.
                        batchNode.SplitByNamespace(keys);

                        foreach (BatchNode.BatchNamespace batchNamespace in batchNode.batchNamespaces)
                        {
                            if (records != null)
                            {
                                MultiCommand command = new BatchGetArrayDirect(batchNode.node, batchNamespace, policy, keys, binNames, records, readAttr);
                                executor.AddCommand(command);
                            }
                            else
                            {
                                MultiCommand command = new BatchExistsArrayDirect(batchNode.node, batchNamespace, policy, keys, existsArray);
                                executor.AddCommand(command);
                            }
                        }
                    }
                }
                executor.Execute(policy.maxConcurrentThreads);
            }
        }
        //-------------------------------------------------------
        // Batch Read Operations
        //-------------------------------------------------------
        /// <summary>
        /// Read multiple records for specified batch keys in one batch call.
        /// This method allows different namespaces/bins to be requested for each key in the batch.
        /// The returned records are located in the same list.
        /// If the BatchRecord key field is not found, the corresponding record field will be null.
        /// The policy can be used to specify timeouts and maximum concurrent threads.
        /// This method requires Aerospike Server version >= 3.6.0.
        /// </summary>
        /// <param name="policy">batch configuration parameters, pass in null for defaults</param>
        /// <param name="records">list of unique record identifiers and the bins to retrieve.
        /// The returned records are located in the same list.</param>
        /// <exception cref="AerospikeException">if read fails</exception>
        public void Get(BatchPolicy policy, List<BatchRead> records)
        {
            if (records.Count == 0)
            {
                return;
            }

            if (policy == null)
            {
                policy = batchPolicyDefault;
            }

            List<BatchNode> batchNodes = BatchNode.GenerateList(cluster, policy, records);

            if (policy.maxConcurrentThreads == 1 || batchNodes.Count <= 1)
            {
                // Run batch requests sequentially in same thread.
                foreach (BatchNode batchNode in batchNodes)
                {
                    if (!batchNode.node.hasBatchIndex)
                    {
                        throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Requested command requires a server that supports new batch index protocol.");
                    }
                    MultiCommand command = new BatchReadListCommand(batchNode, policy, records);
                    command.Execute();
                }
            }
            else
            {
                // Run batch requests in parallel in separate threads.
                //
                // Multiple threads write to the record list, so one might think that
                // volatile or memory barriers are needed on the write threads and this read thread.
                // This should not be necessary here because it happens in Executor which does a
                // volatile write (Interlocked.Increment(ref completedCount)) at the end of write threads
                // and a synchronized WaitTillComplete() in this thread.
                Executor executor = new Executor(batchNodes.Count);

                foreach (BatchNode batchNode in batchNodes)
                {
                    if (!batchNode.node.hasBatchIndex)
                    {
                        throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Requested command requires a server that supports new batch index protocol.");
                    }
                    MultiCommand command = new BatchReadListCommand(batchNode, policy, records);
                    executor.AddCommand(command);
                }
                executor.Execute(policy.maxConcurrentThreads);
            }
        }
        //----------------------------------------------------------
        // Query/Execute UDF (Supported by Aerospike 3 servers only)
        //----------------------------------------------------------
        /// <summary>
        /// Apply user defined function on records that match the statement filter.
        /// Records are not returned to the client.
        /// This asynchronous server call will return before command is complete.  
        /// The user can optionally wait for command completion by using the returned 
        /// ExecuteTask instance.
        /// <para>
        /// This method is only supported by Aerospike 3 servers.
        /// </para>
        /// </summary>
        /// <param name="policy">configuration parameters, pass in null for defaults</param>
        /// <param name="statement">record filter</param>
        /// <param name="packageName">server package where user defined function resides</param>
        /// <param name="functionName">function name</param>
        /// <param name="functionArgs">to pass to function name, if any</param>
        /// <exception cref="AerospikeException">if command fails</exception>
        public ExecuteTask Execute(WritePolicy policy, Statement statement, string packageName, string functionName, params Value[] functionArgs)
        {
            if (policy == null)
            {
                policy = writePolicyDefault;
            }

            statement.SetAggregateFunction(packageName, functionName, functionArgs);
            statement.Prepare(false);

            Node[] nodes = cluster.Nodes;
            if (nodes.Length == 0)
            {
                throw new AerospikeException(ResultCode.SERVER_NOT_AVAILABLE, "Command failed because cluster is empty.");
            }

            Executor executor = new Executor(nodes.Length);

            foreach (Node node in nodes)
            {
                ServerCommand command = new ServerCommand(node, policy, statement);
                executor.AddCommand(command);
            }

            executor.Execute(nodes.Length);
            return new ExecuteTask(cluster, policy, statement);
        }
        //-------------------------------------------------------
        // Scan Operations
        //-------------------------------------------------------
        /// <summary>
        /// Read all records in specified namespace and set.  If the policy's 
        /// concurrentNodes is specified, each server node will be read in
        /// parallel.  Otherwise, server nodes are read in series.
        /// <para>
        /// This call will block until the scan is complete - callbacks are made
        /// within the scope of this call.
        /// </para>
        /// </summary>
        /// <param name="policy">scan configuration parameters, pass in null for defaults</param>
        /// <param name="ns">namespace - equivalent to database name</param>
        /// <param name="setName">optional set name - equivalent to database table</param>
        /// <param name="callback">read callback method - called with record data</param>
        /// <param name="binNames">
        /// optional bin to retrieve. All bins will be returned if not specified.
        /// Aerospike 2 servers ignore this parameter.
        /// </param>
        /// <exception cref="AerospikeException">if scan fails</exception>
        public void ScanAll(ScanPolicy policy, string ns, string setName, ScanCallback callback, params string[] binNames)
        {
            if (policy == null)
            {
                policy = scanPolicyDefault;
            }

            Node[] nodes = cluster.Nodes;

            if (nodes.Length == 0)
            {
                throw new AerospikeException(ResultCode.SERVER_NOT_AVAILABLE, "Scan failed because cluster is empty.");
            }

            if (policy.concurrentNodes)
            {
                Executor executor = new Executor(nodes.Length);
                ulong taskId = RandomShift.ThreadLocalInstance.NextLong();

                foreach (Node node in nodes)
                {
                    ScanCommand command = new ScanCommand(node, policy, ns, setName, callback, binNames, taskId);
                    executor.AddCommand(command);
                }

                executor.Execute(policy.maxConcurrentNodes);
            }
            else
            {
                foreach (Node node in nodes)
                {
                    ScanNode(policy, node, ns, setName, callback, binNames);
                }
            }
        }