/// <summary> /// Generic header write. /// </summary> private void WriteHeader(Policy policy, int readAttr, int writeAttr, int fieldCount, int operationCount) { int infoAttr = 0; if (policy.linearizeRead) { infoAttr |= Command.INFO3_LINEARIZE_READ; } if (policy.consistencyLevel == ConsistencyLevel.CONSISTENCY_ALL) { readAttr |= Command.INFO1_CONSISTENCY_ALL; } dataOffset += 8; // Write all header data except total size which must be written last. dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. dataBuffer[dataOffset++] = (byte)readAttr; dataBuffer[dataOffset++] = (byte)writeAttr; dataBuffer[dataOffset++] = (byte)infoAttr; for (int i = 0; i < 10; i++) { dataBuffer[dataOffset++] = 0; } dataOffset += ByteUtil.IntToBytes((uint)policy.totalTimeout, dataBuffer, dataOffset); dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); }
private void WriteOperation(Operation.Type operationType) { ByteUtil.IntToBytes(4, dataBuffer, dataOffset); dataOffset += 4; dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType); dataBuffer[dataOffset++] = 0; dataBuffer[dataOffset++] = 0; dataBuffer[dataOffset++] = 0; }
public void PackInt(int type, uint val) { if (offset + 5 > buffer.Length) { Resize(5); } buffer[offset++] = (byte)type; ByteUtil.IntToBytes(val, buffer, offset); offset += 4; }
public override int Write(byte[] buf, int offset) { // Write op type ByteUtil.ShortToBytes(op, buf, offset); offset += 2; // Write zero length ByteUtil.IntToBytes(0, buf, offset); offset += 4; return(offset); }
private void WriteOperation(string name, Operation.Type operationType) { int nameLength = ByteUtil.StringToUtf8(name, dataBuffer, dataOffset + OPERATION_HEADER_SIZE); ByteUtil.IntToBytes((uint)(nameLength + 4), dataBuffer, dataOffset); dataOffset += 4; dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType); dataBuffer[dataOffset++] = (byte)0; dataBuffer[dataOffset++] = (byte)0; dataBuffer[dataOffset++] = (byte)nameLength; dataOffset += nameLength; }
public override int Write(byte[] buf, int offset) { // Write value type ByteUtil.ShortToBytes(type, buf, offset); offset += 2; // Write value int len = ByteUtil.StringToUtf8(value, buf, offset + 4); ByteUtil.IntToBytes((uint)len, buf, offset); offset += 4 + len; return(offset); }
private void WriteOperation(Operation operation) { int nameLength = ByteUtil.StringToUtf8(operation.binName, dataBuffer, dataOffset + OPERATION_HEADER_SIZE); int valueLength = operation.value.Write(dataBuffer, dataOffset + OPERATION_HEADER_SIZE + nameLength); ByteUtil.IntToBytes((uint)(nameLength + valueLength + 4), dataBuffer, dataOffset); dataOffset += 4; dataBuffer[dataOffset++] = Operation.GetProtocolType(operation.type); dataBuffer[dataOffset++] = (byte)operation.value.Type; dataBuffer[dataOffset++] = (byte)0; dataBuffer[dataOffset++] = (byte)nameLength; dataOffset += nameLength + valueLength; }
public bool SetBigInt32(uint value, int offset) { try { int capacity = offset + 4; EnsureCapacity(capacity); ByteUtil.IntToBytes(value, bytes, offset); ResetSize(capacity); return(true); } catch (Exception) { return(false); } }
public override int Write(byte[] buf, int offset) { // Write value type ByteUtil.ShortToBytes(type, buf, offset); offset += 2; // Write length ByteUtil.IntToBytes(8, buf, offset); offset += 4; // Write value ByteUtil.LongToBytes((ulong)value, buf, offset); offset += 8; return(offset); }
public override int Write(byte[] buf, int offset) { // Write op type ByteUtil.ShortToBytes(op, buf, offset); offset += 2; // Write length ByteUtil.IntToBytes(4, buf, offset); offset += 4; // Write predicate count ByteUtil.IntToBytes(flags, buf, offset); offset += 4; return(offset); }
/// <summary> /// Generic header write. /// </summary> private void WriteHeader(Policy policy, int readAttr, int writeAttr, int fieldCount, int operationCount) { int infoAttr = 0; switch (policy.readModeSC) { case ReadModeSC.SESSION: break; case ReadModeSC.LINEARIZE: infoAttr |= Command.INFO3_SC_READ_TYPE; break; case ReadModeSC.ALLOW_REPLICA: infoAttr |= Command.INFO3_SC_READ_RELAX; break; case ReadModeSC.ALLOW_UNAVAILABLE: infoAttr |= Command.INFO3_SC_READ_TYPE | Command.INFO3_SC_READ_RELAX; break; } if (policy.readModeAP == ReadModeAP.ALL) { readAttr |= Command.INFO1_READ_MODE_AP_ALL; } dataOffset += 8; // Write all header data except total size which must be written last. dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. dataBuffer[dataOffset++] = (byte)readAttr; dataBuffer[dataOffset++] = (byte)writeAttr; dataBuffer[dataOffset++] = (byte)infoAttr; for (int i = 0; i < 10; i++) { dataBuffer[dataOffset++] = 0; } dataOffset += ByteUtil.IntToBytes((uint)policy.totalTimeout, dataBuffer, dataOffset); dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); }
public override int Write(byte[] buf, int offset) { // Write value type ByteUtil.ShortToBytes(type, buf, offset); offset += 2; // Write value int len = ByteUtil.StringToUtf8(value, buf, offset + 4 + 1 + 2); ByteUtil.IntToBytes((uint)(len + 1 + 2), buf, offset); offset += 4; buf[offset] = 0; // flags offset += 1; ByteUtil.ShortToBytes(0, buf, offset); // ncells offset += 2; offset += len; return(offset); }
private void WriteField(int val, int type) { WriteFieldHeader(4, type); dataOffset += ByteUtil.IntToBytes((uint)val, dataBuffer, dataOffset); }
/// <summary> /// Header write for write operations. /// </summary> protected internal void WriteHeader(WritePolicy policy, int readAttr, int writeAttr, int fieldCount, int operationCount) { // Set flags. int generation = 0; int infoAttr = 0; switch (policy.recordExistsAction) { case RecordExistsAction.UPDATE: break; case RecordExistsAction.UPDATE_ONLY: infoAttr |= Command.INFO3_UPDATE_ONLY; break; case RecordExistsAction.REPLACE: infoAttr |= Command.INFO3_CREATE_OR_REPLACE; break; case RecordExistsAction.REPLACE_ONLY: infoAttr |= Command.INFO3_REPLACE_ONLY; break; case RecordExistsAction.CREATE_ONLY: writeAttr |= Command.INFO2_CREATE_ONLY; break; } switch (policy.generationPolicy) { case GenerationPolicy.NONE: break; case GenerationPolicy.EXPECT_GEN_EQUAL: generation = policy.generation; writeAttr |= Command.INFO2_GENERATION; break; case GenerationPolicy.EXPECT_GEN_GT: generation = policy.generation; writeAttr |= Command.INFO2_GENERATION_GT; break; } if (policy.commitLevel == CommitLevel.COMMIT_MASTER) { infoAttr |= Command.INFO3_COMMIT_MASTER; } if (policy.consistencyLevel == ConsistencyLevel.CONSISTENCY_ALL) { readAttr |= Command.INFO1_CONSISTENCY_ALL; } // Write all header data except total size which must be written last. dataBuffer[8] = MSG_REMAINING_HEADER_SIZE; // Message header length. dataBuffer[9] = (byte)readAttr; dataBuffer[10] = (byte)writeAttr; dataBuffer[11] = (byte)infoAttr; dataBuffer[12] = 0; // unused dataBuffer[13] = 0; // clear the result code ByteUtil.IntToBytes((uint)generation, dataBuffer, 14); ByteUtil.IntToBytes((uint)policy.expiration, dataBuffer, 18); // Initialize timeout. It will be written later. dataBuffer[22] = 0; dataBuffer[23] = 0; dataBuffer[24] = 0; dataBuffer[25] = 0; ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, 26); ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, 28); dataOffset = MSG_TOTAL_HEADER_SIZE; }
/// <summary> /// Header write for write operations. /// </summary> private void WriteHeader(WritePolicy policy, int readAttr, int writeAttr, int fieldCount, int operationCount) { // Set flags. int generation = 0; int infoAttr = 0; switch (policy.recordExistsAction) { case RecordExistsAction.UPDATE: break; case RecordExistsAction.UPDATE_ONLY: infoAttr |= Command.INFO3_UPDATE_ONLY; break; case RecordExistsAction.REPLACE: infoAttr |= Command.INFO3_CREATE_OR_REPLACE; break; case RecordExistsAction.REPLACE_ONLY: infoAttr |= Command.INFO3_REPLACE_ONLY; break; case RecordExistsAction.CREATE_ONLY: writeAttr |= Command.INFO2_CREATE_ONLY; break; } switch (policy.generationPolicy) { case GenerationPolicy.NONE: break; case GenerationPolicy.EXPECT_GEN_EQUAL: generation = policy.generation; writeAttr |= Command.INFO2_GENERATION; break; case GenerationPolicy.EXPECT_GEN_GT: generation = policy.generation; writeAttr |= Command.INFO2_GENERATION_GT; break; } if (policy.commitLevel == CommitLevel.COMMIT_MASTER) { infoAttr |= Command.INFO3_COMMIT_MASTER; } if (policy.linearizeRead) { infoAttr |= Command.INFO3_LINEARIZE_READ; } if (policy.consistencyLevel == ConsistencyLevel.CONSISTENCY_ALL) { readAttr |= Command.INFO1_CONSISTENCY_ALL; } if (policy.durableDelete) { writeAttr |= Command.INFO2_DURABLE_DELETE; } dataOffset += 8; // Write all header data except total size which must be written last. dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. dataBuffer[dataOffset++] = (byte)readAttr; dataBuffer[dataOffset++] = (byte)writeAttr; dataBuffer[dataOffset++] = (byte)infoAttr; dataBuffer[dataOffset++] = 0; // unused dataBuffer[dataOffset++] = 0; // clear the result code dataOffset += ByteUtil.IntToBytes((uint)generation, dataBuffer, dataOffset); dataOffset += ByteUtil.IntToBytes((uint)policy.expiration, dataBuffer, dataOffset); dataOffset += ByteUtil.IntToBytes((uint)policy.totalTimeout, dataBuffer, dataOffset); dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); }
public void SetScan(ScanPolicy policy, string ns, string setName, string[] binNames, ulong taskId) { Begin(); int fieldCount = 0; if (ns != null) { dataOffset += ByteUtil.EstimateSizeUtf8(ns) + FIELD_HEADER_SIZE; fieldCount++; } if (setName != null) { dataOffset += ByteUtil.EstimateSizeUtf8(setName) + FIELD_HEADER_SIZE; fieldCount++; } // Estimate scan options size. dataOffset += 2 + FIELD_HEADER_SIZE; fieldCount++; // Estimate scan timeout size. dataOffset += 4 + FIELD_HEADER_SIZE; fieldCount++; // Estimate taskId size. dataOffset += 8 + FIELD_HEADER_SIZE; fieldCount++; if (binNames != null) { foreach (String binName in binNames) { EstimateOperationSize(binName); } } SizeBuffer(); byte readAttr = (byte)Command.INFO1_READ; if (!policy.includeBinData) { readAttr |= (byte)Command.INFO1_NOBINDATA; } int operationCount = (binNames == null) ? 0 : binNames.Length; WriteHeader(policy, readAttr, 0, fieldCount, operationCount); if (ns != null) { WriteField(ns, FieldType.NAMESPACE); } if (setName != null) { WriteField(setName, FieldType.TABLE); } WriteFieldHeader(2, FieldType.SCAN_OPTIONS); byte priority = (byte)policy.priority; priority <<= 4; if (policy.failOnClusterChange) { priority |= 0x08; } dataBuffer[dataOffset++] = priority; dataBuffer[dataOffset++] = (byte)policy.scanPercent; // Write scan timeout WriteFieldHeader(4, FieldType.SCAN_TIMEOUT); dataOffset += ByteUtil.IntToBytes((uint)policy.socketTimeout, dataBuffer, dataOffset); // Write taskId field WriteFieldHeader(8, FieldType.TRAN_ID); dataOffset += ByteUtil.LongToBytes(taskId, dataBuffer, dataOffset); if (binNames != null) { foreach (String binName in binNames) { WriteOperation(binName, Operation.Type.READ); } } End(); }
public void SetBatchRead(BatchPolicy policy, List <BatchRead> records, BatchNode batch) { // Estimate full row size int[] offsets = batch.offsets; int max = batch.offsetsSize; ushort fieldCount = policy.sendSetName ? (ushort)2 : (ushort)1; BatchRead prev = null; Begin(); dataOffset += FIELD_HEADER_SIZE + 5; for (int i = 0; i < max; i++) { BatchRead record = records[offsets[i]]; Key key = record.key; string[] binNames = record.binNames; dataOffset += key.digest.Length + 4; // Avoid relatively expensive full equality checks for performance reasons. // Use reference equality only in hope that common namespaces/bin names are set from // fixed variables. It's fine if equality not determined correctly because it just // results in more space used. The batch will still be correct. if (prev != null && prev.key.ns == key.ns && (!policy.sendSetName || prev.key.setName == key.setName) && prev.binNames == binNames && prev.readAllBins == record.readAllBins) { // Can set repeat previous namespace/bin names to save space. dataOffset++; } else { // Estimate full header, namespace and bin names. dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE + 6; if (policy.sendSetName) { dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; } if (binNames != null) { foreach (string binName in binNames) { EstimateOperationSize(binName); } } prev = record; } } SizeBuffer(); int readAttr = Command.INFO1_READ; if (policy.consistencyLevel == ConsistencyLevel.CONSISTENCY_ALL) { readAttr |= Command.INFO1_CONSISTENCY_ALL; } WriteHeader(policy, readAttr | Command.INFO1_BATCH, 0, 1, 0); int fieldSizeOffset = dataOffset; WriteFieldHeader(0, policy.sendSetName ? FieldType.BATCH_INDEX_WITH_SET : FieldType.BATCH_INDEX); // Need to update size at end ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); dataOffset += 4; dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0; prev = null; for (int i = 0; i < max; i++) { int index = offsets[i]; ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); dataOffset += 4; BatchRead record = records[index]; Key key = record.key; string[] binNames = record.binNames; byte[] digest = key.digest; Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); dataOffset += digest.Length; // Avoid relatively expensive full equality checks for performance reasons. // Use reference equality only in hope that common namespaces/bin names are set from // fixed variables. It's fine if equality not determined correctly because it just // results in more space used. The batch will still be correct. if (prev != null && prev.key.ns == key.ns && (!policy.sendSetName || prev.key.setName == key.setName) && prev.binNames == binNames && prev.readAllBins == record.readAllBins) { // Can set repeat previous namespace/bin names to save space. dataBuffer[dataOffset++] = 1; // repeat } else { // Write full header, namespace and bin names. dataBuffer[dataOffset++] = 0; // do not repeat if (binNames != null && binNames.Length != 0) { dataBuffer[dataOffset++] = (byte)readAttr; dataOffset += ByteUtil.ShortToBytes(fieldCount, dataBuffer, dataOffset); dataOffset += ByteUtil.ShortToBytes((ushort)binNames.Length, dataBuffer, dataOffset); WriteField(key.ns, FieldType.NAMESPACE); if (policy.sendSetName) { WriteField(key.setName, FieldType.TABLE); } foreach (string binName in binNames) { WriteOperation(binName, Operation.Type.READ); } } else { dataBuffer[dataOffset++] = (byte)(readAttr | (record.readAllBins ? Command.INFO1_GET_ALL : Command.INFO1_NOBINDATA)); dataOffset += ByteUtil.ShortToBytes(fieldCount, dataBuffer, dataOffset); dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset); WriteField(key.ns, FieldType.NAMESPACE); if (policy.sendSetName) { WriteField(key.setName, FieldType.TABLE); } } prev = record; } } // Write real field size. ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); End(); }
private void WriteFieldHeader(int size, int type) { ByteUtil.IntToBytes((uint)size + 1, dataBuffer, dataOffset); dataOffset += 4; dataBuffer[dataOffset++] = (byte)type; }
public void Execute(Cluster cluster, Policy policy, Key key, Node node, bool isRead) { Partition partition = (key != null)? new Partition(key) : null; AerospikeException exception = null; DateTime deadline = DateTime.MinValue; int socketTimeout = policy.socketTimeout; int totalTimeout = policy.totalTimeout; int iteration = 0; int commandSentCounter = 0; bool isClientTimeout; if (totalTimeout > 0) { deadline = DateTime.UtcNow.AddMilliseconds(totalTimeout); if (socketTimeout == 0 || socketTimeout > totalTimeout) { socketTimeout = totalTimeout; } } // Execute command until successful, timed out or maximum iterations have been reached. while (true) { try { if (partition != null) { // Single record command node retrieval. node = GetNode(cluster, partition, policy.replica, isRead); } Connection conn = node.GetConnection(socketTimeout); try { // Set command buffer. WriteBuffer(); // Check if timeout needs to be changed in send buffer. if (totalTimeout != policy.totalTimeout) { // Reset timeout in send buffer (destined for server) and socket. ByteUtil.IntToBytes((uint)totalTimeout, dataBuffer, 22); } // Send command. conn.Write(dataBuffer, dataOffset); commandSentCounter++; // Parse results. ParseResult(conn); // Put connection back in pool. node.PutConnection(conn); // Command has completed successfully. Exit method. return; } catch (AerospikeException ae) { if (ae.KeepConnection()) { // Put connection back in pool. node.PutConnection(conn); } else { // Close socket to flush out possible garbage. Do not put back in pool. node.CloseConnection(conn); } if (ae.Result == ResultCode.TIMEOUT) { // Go through retry logic on server timeout. exception = new AerospikeException.Timeout(node, policy, iteration + 1, false); isClientTimeout = false; if (isRead) { base.sequence++; } } else { ae.SetInDoubt(isRead, commandSentCounter); throw; } } catch (SocketException se) { // Socket errors are considered temporary anomalies. // Retry after closing connection. node.CloseConnection(conn); if (se.SocketErrorCode == SocketError.TimedOut) { isClientTimeout = true; if (isRead) { base.sequence++; } } else { exception = new AerospikeException(se); isClientTimeout = false; base.sequence++; } } catch (Exception) { // All other exceptions are considered fatal. Do not retry. // Close socket to flush out possible garbage. Do not put back in pool. node.CloseConnection(conn); throw; } } catch (AerospikeException.Connection ce) { // Socket connection error has occurred. Retry. exception = ce; isClientTimeout = false; base.sequence++; } // Check maxRetries. if (++iteration > policy.maxRetries) { break; } if (policy.totalTimeout > 0) { // Check for total timeout. long remaining = (long)deadline.Subtract(DateTime.UtcNow).TotalMilliseconds - policy.sleepBetweenRetries; if (remaining <= 0) { break; } if (remaining < totalTimeout) { totalTimeout = (int)remaining; if (socketTimeout > totalTimeout) { socketTimeout = totalTimeout; } } } if (!isClientTimeout && policy.sleepBetweenRetries > 0) { // Sleep before trying again. Util.Sleep(policy.sleepBetweenRetries); } } // Retries have been exhausted. Throw last exception. if (isClientTimeout) { exception = new AerospikeException.Timeout(node, policy, iteration, true); } exception.SetInDoubt(isRead, commandSentCounter); throw exception; }
public void SetBatchRead(BatchPolicy policy, Key[] keys, BatchNode batch, string[] binNames, int readAttr) { // Estimate full row size int[] offsets = batch.offsets; int max = batch.offsetsSize; ushort fieldCount = policy.sendSetName ? (ushort)2 : (ushort)1; // Calculate size of bin names. int binNameSize = 0; int operationCount = 0; if (binNames != null) { foreach (string binName in binNames) { binNameSize += ByteUtil.EstimateSizeUtf8(binName) + OPERATION_HEADER_SIZE; } operationCount = binNames.Length; } // Estimate buffer size. Begin(); dataOffset += FIELD_HEADER_SIZE + 5; Key prev = null; for (int i = 0; i < max; i++) { Key key = keys[offsets[i]]; dataOffset += key.digest.Length + 4; // Try reference equality in hope that namespace for all keys is set from a fixed variable. if (prev != null && prev.ns == key.ns && (!policy.sendSetName || prev.setName == key.setName)) { // Can set repeat previous namespace/bin names to save space. dataOffset++; } else { // Estimate full header, namespace and bin names. dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE + 6; if (policy.sendSetName) { dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; } dataOffset += binNameSize; prev = key; } } SizeBuffer(); if (policy.consistencyLevel == ConsistencyLevel.CONSISTENCY_ALL) { readAttr |= Command.INFO1_CONSISTENCY_ALL; } WriteHeader(policy, readAttr | Command.INFO1_BATCH, 0, 1, 0); int fieldSizeOffset = dataOffset; WriteFieldHeader(0, policy.sendSetName ? FieldType.BATCH_INDEX_WITH_SET : FieldType.BATCH_INDEX); // Need to update size at end ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); dataOffset += 4; dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0; prev = null; for (int i = 0; i < max; i++) { int index = offsets[i]; ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); dataOffset += 4; Key key = keys[index]; byte[] digest = key.digest; Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); dataOffset += digest.Length; // Try reference equality in hope that namespace for all keys is set from a fixed variable. if (prev != null && prev.ns == key.ns && (!policy.sendSetName || prev.setName == key.setName)) { // Can set repeat previous namespace/bin names to save space. dataBuffer[dataOffset++] = 1; // repeat } else { // Write full header, namespace and bin names. dataBuffer[dataOffset++] = 0; // do not repeat dataBuffer[dataOffset++] = (byte)readAttr; dataOffset += ByteUtil.ShortToBytes(fieldCount, dataBuffer, dataOffset); dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); WriteField(key.ns, FieldType.NAMESPACE); if (policy.sendSetName) { WriteField(key.setName, FieldType.TABLE); } if (binNames != null) { foreach (string binName in binNames) { WriteOperation(binName, Operation.Type.READ); } } prev = key; } } // Write real field size. ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); End(); }
public void Execute() { Policy policy = GetPolicy(); int remainingMillis = policy.timeout; DateTime limit = DateTime.UtcNow.AddMilliseconds(remainingMillis); Node node = null; Exception exception = null; int failedNodes = 0; int failedConns = 0; int iterations = 0; dataBuffer = ThreadLocalData.GetBuffer(); // Execute command until successful, timed out or maximum iterations have been reached. while (true) { try { node = GetNode(); Connection conn = node.GetConnection(remainingMillis); try { // Set command buffer. WriteBuffer(); // Reset timeout in send buffer (destined for server) and socket. ByteUtil.IntToBytes((uint)remainingMillis, dataBuffer, 22); // Send command. conn.Write(dataBuffer, dataOffset); // Parse results. ParseResult(conn); // Reflect healthy status. conn.UpdateLastUsed(); // Put connection back in pool. node.PutConnection(conn); // Command has completed successfully. Exit method. return; } catch (AerospikeException ae) { if (ae.KeepConnection()) { // Put connection back in pool. conn.UpdateLastUsed(); node.PutConnection(conn); } else { // Close socket to flush out possible garbage. Do not put back in pool. node.CloseConnection(conn); } throw; } catch (SocketException ioe) { node.CloseConnection(conn); if (ioe.ErrorCode == (int)SocketError.TimedOut) { // Full timeout has been reached. Do not retry. // Close socket to flush out possible garbage. Do not put back in pool. throw new AerospikeException.Timeout(node, policy.timeout, ++iterations, failedNodes, failedConns); } else { // IO errors are considered temporary anomalies. Retry. // Close socket to flush out possible garbage. Do not put back in pool. exception = ioe; } } catch (Exception) { // All runtime exceptions are considered fatal. Do not retry. // Close socket to flush out possible garbage. Do not put back in pool. node.CloseConnection(conn); throw; } } catch (AerospikeException.InvalidNode ine) { // Node is currently inactive. Retry. exception = ine; failedNodes++; } catch (AerospikeException.Connection ce) { // Socket connection error has occurred. Retry. exception = ce; failedConns++; } if (++iterations > policy.maxRetries) { break; } // Check for client timeout. if (policy.timeout > 0) { remainingMillis = (int)limit.Subtract(DateTime.UtcNow).TotalMilliseconds - policy.sleepBetweenRetries; if (remainingMillis <= 0) { break; } } if (policy.sleepBetweenRetries > 0) { // Sleep before trying again. Util.Sleep(policy.sleepBetweenRetries); } // Reset node reference and try again. node = null; } // Retries have been exhausted. Throw last exception. throw exception; }
protected internal void SetQuery(Policy policy, Statement statement, bool write) { byte[] functionArgBuffer = null; int fieldCount = 0; int filterSize = 0; int binNameSize = 0; Begin(); if (statement.ns != null) { dataOffset += ByteUtil.EstimateSizeUtf8(statement.ns) + FIELD_HEADER_SIZE; fieldCount++; } if (statement.indexName != null) { dataOffset += ByteUtil.EstimateSizeUtf8(statement.indexName) + FIELD_HEADER_SIZE; fieldCount++; } if (statement.setName != null) { dataOffset += ByteUtil.EstimateSizeUtf8(statement.setName) + FIELD_HEADER_SIZE; fieldCount++; } // Allocate space for TaskId field. dataOffset += 8 + FIELD_HEADER_SIZE; fieldCount++; if (statement.filter != null) { IndexCollectionType type = statement.filter.CollectionType; if (type != IndexCollectionType.DEFAULT) { dataOffset += FIELD_HEADER_SIZE + 1; fieldCount++; } dataOffset += FIELD_HEADER_SIZE; filterSize++; // num filters filterSize += statement.filter.EstimateSize(); dataOffset += filterSize; fieldCount++; // Query bin names are specified as a field (Scan bin names are specified later as operations) if (statement.binNames != null) { dataOffset += FIELD_HEADER_SIZE; binNameSize++; // num bin names foreach (string binName in statement.binNames) { binNameSize += ByteUtil.EstimateSizeUtf8(binName) + 1; } dataOffset += binNameSize; fieldCount++; } } else { // Calling query with no filters is more efficiently handled by a primary index scan. // Estimate scan options size. dataOffset += 2 + FIELD_HEADER_SIZE; fieldCount++; // Estimate scan timeout size. dataOffset += 4 + FIELD_HEADER_SIZE; fieldCount++; } PredExp[] predExp = statement.PredExp; int predSize = 0; if (predExp != null) { dataOffset += FIELD_HEADER_SIZE; predSize = PredExp.EstimateSize(predExp); dataOffset += predSize; fieldCount++; } if (statement.functionName != null) { dataOffset += FIELD_HEADER_SIZE + 1; // udf type dataOffset += ByteUtil.EstimateSizeUtf8(statement.packageName) + FIELD_HEADER_SIZE; dataOffset += ByteUtil.EstimateSizeUtf8(statement.functionName) + FIELD_HEADER_SIZE; if (statement.functionArgs.Length > 0) { functionArgBuffer = Packer.Pack(statement.functionArgs); } else { functionArgBuffer = new byte[0]; } dataOffset += FIELD_HEADER_SIZE + functionArgBuffer.Length; fieldCount += 4; } if (statement.filter == null) { if (statement.binNames != null) { foreach (string binName in statement.binNames) { EstimateOperationSize(binName); } } } SizeBuffer(); int operationCount = (statement.filter == null && statement.binNames != null) ? statement.binNames.Length : 0; if (write) { WriteHeader((WritePolicy)policy, Command.INFO1_READ, Command.INFO2_WRITE, fieldCount, operationCount); } else { QueryPolicy qp = (QueryPolicy)policy; int readAttr = qp.includeBinData ? Command.INFO1_READ : Command.INFO1_READ | Command.INFO1_NOBINDATA; WriteHeader(policy, readAttr, 0, fieldCount, operationCount); } if (statement.ns != null) { WriteField(statement.ns, FieldType.NAMESPACE); } if (statement.indexName != null) { WriteField(statement.indexName, FieldType.INDEX_NAME); } if (statement.setName != null) { WriteField(statement.setName, FieldType.TABLE); } // Write taskId field WriteFieldHeader(8, FieldType.TRAN_ID); ByteUtil.LongToBytes(statement.taskId, dataBuffer, dataOffset); dataOffset += 8; if (statement.filter != null) { IndexCollectionType type = statement.filter.CollectionType; if (type != IndexCollectionType.DEFAULT) { WriteFieldHeader(1, FieldType.INDEX_TYPE); dataBuffer[dataOffset++] = (byte)type; } WriteFieldHeader(filterSize, FieldType.INDEX_RANGE); dataBuffer[dataOffset++] = (byte)1; dataOffset = statement.filter.Write(dataBuffer, dataOffset); // Query bin names are specified as a field (Scan bin names are specified later as operations) if (statement.binNames != null) { WriteFieldHeader(binNameSize, FieldType.QUERY_BINLIST); dataBuffer[dataOffset++] = (byte)statement.binNames.Length; foreach (string binName in statement.binNames) { int len = ByteUtil.StringToUtf8(binName, dataBuffer, dataOffset + 1); dataBuffer[dataOffset] = (byte)len; dataOffset += len + 1; } } } else { // Calling query with no filters is more efficiently handled by a primary index scan. WriteFieldHeader(2, FieldType.SCAN_OPTIONS); byte priority = (byte)policy.priority; priority <<= 4; dataBuffer[dataOffset++] = priority; dataBuffer[dataOffset++] = (byte)100; // Write scan timeout WriteFieldHeader(4, FieldType.SCAN_TIMEOUT); dataOffset += ByteUtil.IntToBytes((uint)policy.socketTimeout, dataBuffer, dataOffset); } if (predExp != null) { WriteFieldHeader(predSize, FieldType.PREDEXP); dataOffset = PredExp.Write(predExp, dataBuffer, dataOffset); } if (statement.functionName != null) { WriteFieldHeader(1, FieldType.UDF_OP); dataBuffer[dataOffset++] = (statement.returnData) ? (byte)1 : (byte)2; WriteField(statement.packageName, FieldType.UDF_PACKAGE_NAME); WriteField(statement.functionName, FieldType.UDF_FUNCTION); WriteField(functionArgBuffer, FieldType.UDF_ARGLIST); } // Scan bin names are specified after all fields. if (statement.filter == null) { if (statement.binNames != null) { foreach (string binName in statement.binNames) { WriteOperation(binName, Operation.Type.READ); } } } End(); }
public void Execute ( Cluster cluster, Policy policy, bool isRead, int socketTimeout, int totalTimeout, DateTime deadline, int iteration, int commandSentCounter ) { Node node; AerospikeException exception = null; bool isClientTimeout; // Execute command until successful, timed out or maximum iterations have been reached. while (true) { try { node = GetNode(cluster); } catch (AerospikeException ae) { ae.Iteration = iteration; ae.SetInDoubt(isRead, commandSentCounter); throw; } try { Connection conn = node.GetConnection(socketTimeout); try { // Set command buffer. WriteBuffer(); // Check if timeout needs to be changed in send buffer. if (totalTimeout != policy.totalTimeout) { // Reset timeout in send buffer (destined for server) and socket. ByteUtil.IntToBytes((uint)totalTimeout, dataBuffer, 22); } // Send command. conn.Write(dataBuffer, dataOffset); commandSentCounter++; // Parse results. ParseResult(conn); // Put connection back in pool. node.PutConnection(conn); // Command has completed successfully. Exit method. return; } catch (AerospikeException ae) { if (ae.KeepConnection()) { // Put connection back in pool. node.PutConnection(conn); } else { // Close socket to flush out possible garbage. Do not put back in pool. node.CloseConnection(conn); } if (ae.Result == ResultCode.TIMEOUT) { // Go through retry logic on server timeout. exception = new AerospikeException.Timeout(policy, false); isClientTimeout = false; } else { throw; } } catch (SocketException se) { // Socket errors are considered temporary anomalies. // Retry after closing connection. node.CloseConnection(conn); if (se.SocketErrorCode == SocketError.TimedOut) { isClientTimeout = true; } else { exception = new AerospikeException(se); isClientTimeout = false; } } catch (Exception) { // All other exceptions are considered fatal. Do not retry. // Close socket to flush out possible garbage. Do not put back in pool. node.CloseConnection(conn); throw; } } catch (SocketException se) { // This exception might happen after initial connection succeeded, but // user login failed with a socket error. Retry. if (se.SocketErrorCode == SocketError.TimedOut) { isClientTimeout = true; } else { exception = new AerospikeException(se); isClientTimeout = false; } } catch (AerospikeException.Connection ce) { // Socket connection error has occurred. Retry. exception = ce; isClientTimeout = false; } catch (AerospikeException ae) { ae.Node = node; ae.Iteration = iteration; ae.SetInDoubt(isRead, commandSentCounter); throw; } // Check maxRetries. if (iteration > policy.maxRetries) { break; } if (policy.totalTimeout > 0) { // Check for total timeout. long remaining = (long)deadline.Subtract(DateTime.UtcNow).TotalMilliseconds - policy.sleepBetweenRetries; if (remaining <= 0) { break; } if (remaining < totalTimeout) { totalTimeout = (int)remaining; if (socketTimeout > totalTimeout) { socketTimeout = totalTimeout; } } } if (!isClientTimeout && policy.sleepBetweenRetries > 0) { // Sleep before trying again. Util.Sleep(policy.sleepBetweenRetries); } iteration++; if (!PrepareRetry(isClientTimeout || exception.Result == ResultCode.TIMEOUT)) { // Batch may be retried in separate commands. if (RetryBatch(cluster, socketTimeout, totalTimeout, deadline, iteration, commandSentCounter)) { // Batch was retried in separate commands. Complete this command. return; } } } // Retries have been exhausted. Throw last exception. if (isClientTimeout) { exception = new AerospikeException.Timeout(policy, true); } exception.Node = node; exception.Iteration = iteration; exception.SetInDoubt(isRead, commandSentCounter); throw exception; }
private void WriteFieldHeader(byte id, int size) { ByteUtil.IntToBytes((uint)size + 1, dataBuffer, dataOffset); dataOffset += 4; dataBuffer[dataOffset++] = id; }
public void SetBatchRead(BatchPolicy policy, Key[] keys, BatchNode batch, string[] binNames, int readAttr) { // Estimate full row size int[] offsets = batch.offsets; int max = batch.offsetsSize; int rowSize = 30 + FIELD_HEADER_SIZE + 31; // Row's header(30) + max namespace(31). int operationCount = 0; if (binNames != null) { foreach (string binName in binNames) { EstimateOperationSize(binName); } rowSize += dataOffset; operationCount = binNames.Length; } // Estimate buffer size. Begin(); dataOffset += FIELD_HEADER_SIZE + 5; string prevNamespace = null; for (int i = 0; i < max; i++) { Key key = keys[offsets[i]]; // Try reference equality in hope that namespace for all keys is set from a fixed variable. if (key.ns == prevNamespace || (prevNamespace != null && prevNamespace.Equals(key.ns))) { // Can set repeat previous namespace/bin names to save space. dataOffset += 25; } else { // Must write full header and namespace/bin names. dataOffset += rowSize; prevNamespace = key.ns; } } SizeBuffer(); WriteHeader(policy, readAttr | Command.INFO1_BATCH, 0, 1, 0); int fieldSizeOffset = dataOffset; WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); dataOffset += 4; dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0; prevNamespace = null; for (int i = 0; i < max; i++) { int index = offsets[i]; ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); dataOffset += 4; Key key = keys[index]; byte[] digest = key.digest; Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); dataOffset += digest.Length; // Try reference equality in hope that namespace for all keys is set from a fixed variable. if (key.ns == prevNamespace || (prevNamespace != null && prevNamespace.Equals(key.ns))) { // Can set repeat previous namespace/bin names to save space. dataBuffer[dataOffset++] = 1; // repeat } else { // Write full header, namespace and bin names. dataBuffer[dataOffset++] = 0; // do not repeat dataBuffer[dataOffset++] = (byte)readAttr; dataBuffer[dataOffset++] = 0; // pad dataBuffer[dataOffset++] = 0; // pad ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); dataOffset += 2; WriteField(key.ns, FieldType.NAMESPACE); if (binNames != null) { foreach (string binName in binNames) { WriteOperation(binName, Operation.Type.READ); } } prevNamespace = key.ns; } } // Write real field size. ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); End(); }