/// <summary> /// Return single value from response buffer. /// </summary> public string GetValue() { SkipToValue(); return(ByteUtil.Utf8ToString(buffer, offset, length - offset - 1)); }
/// <summary> /// Parse all results in the batch. Add records to shared list. /// If the record was not found, the bins will be null. /// </summary> protected internal override bool ParseRecordResults(int receiveSize) { //Parse each message response and add it to the result array dataOffset = 0; while (dataOffset < receiveSize) { ReadBytes(MSG_REMAINING_HEADER_SIZE); int resultCode = dataBuffer[5]; // The only valid server return codes are "ok" and "not found". // If other return codes are received, then abort the batch. if (resultCode != 0 && resultCode != ResultCode.KEY_NOT_FOUND_ERROR) { throw new AerospikeException(resultCode); } byte info3 = dataBuffer[3]; // If this is the end marker of the response, do not proceed further if ((info3 & Command.INFO3_LAST) == Command.INFO3_LAST) { return(false); } int generation = ByteUtil.BytesToInt(dataBuffer, 6); int expiration = ByteUtil.BytesToInt(dataBuffer, 10); int fieldCount = ByteUtil.BytesToShort(dataBuffer, 18); int opCount = ByteUtil.BytesToShort(dataBuffer, 20); Key key = ParseKey(fieldCount); int offset = batch.offsets[index++]; if (Util.ByteArrayEquals(key.digest, keys[offset].digest)) { if (resultCode == 0) { records[offset] = ParseRecord(opCount, generation, expiration); } } else { throw new AerospikeException.Parse("Unexpected batch key returned: " + key.ns + ',' + ByteUtil.BytesToHexString(key.digest) + ',' + index + ',' + offset); } } return(true); }
public object UnpackObject() { int type = buffer[offset++]; switch (type) { case 0xc0: // nil { return(null); } case 0xc3: // boolean true { return(true); } case 0xc2: // boolean false { return(false); } case 0xca: // float { float val = ByteUtil.BytesToFloat(buffer, offset); offset += 4; return(val); } case 0xcb: // double { double val = ByteUtil.BytesToDouble(buffer, offset); offset += 8; return(val); } case 0xd0: // signed 8 bit integer { return((long)(sbyte)(buffer[offset++])); } case 0xcc: // unsigned 8 bit integer { return((long)(buffer[offset++])); } case 0xd1: // signed 16 bit integer { int val = ByteUtil.BytesToShort(buffer, offset); offset += 2; return((long)(short)val); } case 0xcd: // unsigned 16 bit integer { int val = ByteUtil.BytesToShort(buffer, offset); offset += 2; return((long)val); } case 0xd2: // signed 32 bit integer { int val = ByteUtil.BytesToInt(buffer, offset); offset += 4; return((long)val); } case 0xce: // unsigned 32 bit integer { uint val = ByteUtil.BytesToUInt(buffer, offset); offset += 4; return((long)val); } case 0xd3: // signed 64 bit integer { long val = ByteUtil.BytesToLong(buffer, offset); offset += 8; return(val); } case 0xcf: // unsigned 64 bit integer { // The contract is to always return long. // The caller can always cast back to ulong. long val = ByteUtil.BytesToLong(buffer, offset); offset += 8; return(val); } case 0xc4: case 0xd9: // string raw bytes with 8 bit header { int count = buffer[offset++]; return(UnpackBlob(count)); } case 0xc5: case 0xda: // raw bytes with 16 bit header { int count = ByteUtil.BytesToShort(buffer, offset); offset += 2; return(UnpackBlob(count)); } case 0xc6: case 0xdb: // raw bytes with 32 bit header { // Array length is restricted to positive int values (0 - int.MAX_VALUE). int count = ByteUtil.BytesToInt(buffer, offset); offset += 4; return(UnpackBlob(count)); } case 0xdc: // list with 16 bit header { int count = ByteUtil.BytesToShort(buffer, offset); offset += 2; return(UnpackList(count)); } case 0xdd: // list with 32 bit header { // List size is restricted to positive int values (0 - int.MAX_VALUE). int count = ByteUtil.BytesToInt(buffer, offset); offset += 4; return(UnpackList(count)); } case 0xde: // map with 16 bit header { int count = ByteUtil.BytesToShort(buffer, offset); offset += 2; return(UnpackMap(count)); } case 0xdf: // map with 32 bit header { // Map size is restricted to positive int values (0 - int.MAX_VALUE). int count = ByteUtil.BytesToInt(buffer, offset); offset += 4; return(UnpackMap(count)); } case 0xd4: // Skip over type extension with 1 byte { offset += 1 + 1; return(null); } case 0xd5: // Skip over type extension with 2 bytes { offset += 1 + 2; return(null); } case 0xd6: // Skip over type extension with 4 bytes { offset += 1 + 4; return(null); } case 0xd7: // Skip over type extension with 8 bytes { offset += 1 + 8; return(null); } case 0xd8: // Skip over type extension with 16 bytes { offset += 1 + 16; return(null); } case 0xc7: // Skip over type extension with 8 bit header and bytes { int count = buffer[offset]; offset += count + 1 + 1; return(null); } case 0xc8: // Skip over type extension with 16 bit header and bytes { int count = ByteUtil.BytesToShort(buffer, offset); offset += count + 1 + 2; return(null); } case 0xc9: // Skip over type extension with 32 bit header and bytes { int count = ByteUtil.BytesToInt(buffer, offset); offset += count + 1 + 4; return(null); } default: { if ((type & 0xe0) == 0xa0) // raw bytes with 8 bit combined header { return(UnpackBlob(type & 0x1f)); } if ((type & 0xf0) == 0x80) // map with 8 bit combined header { return(UnpackMap(type & 0x0f)); } if ((type & 0xf0) == 0x90) // list with 8 bit combined header { return(UnpackList(type & 0x0f)); } if (type < 0x80) // 8 bit combined unsigned integer { return((long)type); } if (type >= 0xe0) // 8 bit combined signed integer { return((long)(type - 0xe0 - 32)); } throw new IOException("Unknown unpack type: " + type); } } }
protected internal override void ParseRow(Key key) { if (opCount > 0) { throw new AerospikeException.Parse("Received bins that were not requested!"); } if (Util.ByteArrayEquals(key.digest, keys[batchIndex].digest)) { existsArray[batchIndex] = resultCode == 0; } else { throw new AerospikeException.Parse("Unexpected batch key returned: " + key.ns + ',' + ByteUtil.BytesToHexString(key.digest) + ',' + batchIndex); } }
protected internal override void ParseRow(Key key) { BatchRead record = records[batchIndex]; if (Util.ByteArrayEquals(key.digest, record.key.digest)) { if (resultCode == 0) { record.record = ParseRecord(); } } else { throw new AerospikeException.Parse("Unexpected batch key returned: " + key.ns + ',' + ByteUtil.BytesToHexString(key.digest) + ',' + batchIndex); } }
private void EstimateOperationSize(string binName) { dataOffset += ByteUtil.EstimateSizeUtf8(binName) + OPERATION_HEADER_SIZE; }
public void Execute ( Cluster cluster, Policy policy, Partition partition, Node node, bool isRead, int socketTimeout, int totalTimeout, DateTime deadline, int iteration, int commandSentCounter ) { AerospikeException exception = null; bool isClientTimeout; // Execute command until successful, timed out or maximum iterations have been reached. while (true) { if (partition != null) { // Single record command node retrieval. try { node = GetNode(cluster, policy, partition, isRead); } catch (AerospikeException ae) { ae.Iteration = iteration; ae.SetInDoubt(isRead, commandSentCounter); throw; } } try { Connection conn = node.GetConnection(socketTimeout); try { // Set command buffer. WriteBuffer(); // Check if timeout needs to be changed in send buffer. if (totalTimeout != policy.totalTimeout) { // Reset timeout in send buffer (destined for server) and socket. ByteUtil.IntToBytes((uint)totalTimeout, dataBuffer, 22); } // Send command. conn.Write(dataBuffer, dataOffset); commandSentCounter++; // Parse results. ParseResult(conn); // Put connection back in pool. node.PutConnection(conn); // Command has completed successfully. Exit method. return; } catch (AerospikeException ae) { if (ae.KeepConnection()) { // Put connection back in pool. node.PutConnection(conn); } else { // Close socket to flush out possible garbage. Do not put back in pool. node.CloseConnection(conn); } if (ae.Result == ResultCode.TIMEOUT) { // Go through retry logic on server timeout. exception = new AerospikeException.Timeout(policy, false); isClientTimeout = false; ShiftSequenceOnRead(policy, isRead); } else { throw; } } catch (SocketException se) { // Socket errors are considered temporary anomalies. // Retry after closing connection. node.CloseConnection(conn); if (se.SocketErrorCode == SocketError.TimedOut) { isClientTimeout = true; ShiftSequenceOnRead(policy, isRead); } else { exception = new AerospikeException(se); isClientTimeout = false; base.sequence++; } } catch (Exception) { // All other exceptions are considered fatal. Do not retry. // Close socket to flush out possible garbage. Do not put back in pool. node.CloseConnection(conn); throw; } } catch (SocketException se) { // This exception might happen after initial connection succeeded, but // user login failed with a socket error. Retry. if (se.SocketErrorCode == SocketError.TimedOut) { isClientTimeout = true; ShiftSequenceOnRead(policy, isRead); } else { exception = new AerospikeException(se); isClientTimeout = false; base.sequence++; } } catch (AerospikeException.Connection ce) { // Socket connection error has occurred. Retry. exception = ce; isClientTimeout = false; base.sequence++; } catch (AerospikeException ae) { ae.Node = node; ae.Iteration = iteration; ae.SetInDoubt(isRead, commandSentCounter); throw; } // Check maxRetries. if (iteration > policy.maxRetries) { break; } if (policy.totalTimeout > 0) { // Check for total timeout. long remaining = (long)deadline.Subtract(DateTime.UtcNow).TotalMilliseconds - policy.sleepBetweenRetries; if (remaining <= 0) { break; } if (remaining < totalTimeout) { totalTimeout = (int)remaining; if (socketTimeout > totalTimeout) { socketTimeout = totalTimeout; } } } if (!isClientTimeout && policy.sleepBetweenRetries > 0) { // Sleep before trying again. Util.Sleep(policy.sleepBetweenRetries); } iteration++; if (ShouldRetryBatch() && RetryBatch(cluster, socketTimeout, totalTimeout, deadline, iteration, commandSentCounter)) { // Batch retried in separate commands. Complete this command. return; } } // Retries have been exhausted. Throw last exception. if (isClientTimeout) { exception = new AerospikeException.Timeout(policy, true); } exception.Node = node; exception.Iteration = iteration; exception.SetInDoubt(isRead, commandSentCounter); throw exception; }
private bool ValidateServerCertificate(object sender, X509Certificate cert, X509Chain chain, SslPolicyErrors sslPolicyErrors) { // Exclude certificate serial numbers. if (policy.revokeCertificates != null) { byte[] serialNumber = cert.GetSerialNumber(); foreach (byte[] sn in policy.revokeCertificates) { if (Util.ByteArrayEquals(serialNumber, sn)) { if (Log.DebugEnabled()) { Log.Debug("Invalid certificate serial number: " + ByteUtil.BytesToHexString(serialNumber)); } return(false); } } } if (sslPolicyErrors == SslPolicyErrors.None) { return(true); } // Search subject alternative names. var cert2 = (X509Certificate2)cert; foreach (X509Extension ext in cert2.Extensions) { if (ext.Oid.Value.Equals("2.5.29.17")) // Subject Alternative Name { const string filter = "DNS Name="; string sans = ext.Format(false); string san; int begin = 0; int end; while ((begin = sans.IndexOf(filter, begin)) >= 0) { begin += filter.Length; end = sans.IndexOf(',', begin); if (end >= 0) { san = sans.Substring(begin, end - begin); } else { san = sans.Substring(begin); } if (san.Equals(tlsName)) { return(true); } if (end < 0) { break; } begin = end + 1; } } } if (Log.DebugEnabled()) { Log.Debug("TLS connection error: " + sslPolicyErrors); } return(false); }
//------------------------------------------------------- // 64 bit floating point conversions. //------------------------------------------------------- public static int DoubleToBytes(double v, byte[] buf, int offset) { return(ByteUtil.LongToBytes((ulong)BitConverter.DoubleToInt64Bits(v), buf, offset)); }
public override int EstimateSize() { return(ByteUtil.EstimateSizeUtf8(value) + 6); }
public override int EstimateSize() { // type + len + flags + ncells + jsonstr return(2 + 4 + 1 + 2 + ByteUtil.EstimateSizeUtf8(this.value)); }
/// <summary> /// Header write for write operations. /// </summary> protected internal void WriteHeader(WritePolicy policy, int readAttr, int writeAttr, int fieldCount, int operationCount) { // Set flags. int generation = 0; int infoAttr = 0; switch (policy.recordExistsAction) { case RecordExistsAction.UPDATE: break; case RecordExistsAction.UPDATE_ONLY: infoAttr |= Command.INFO3_UPDATE_ONLY; break; case RecordExistsAction.REPLACE: infoAttr |= Command.INFO3_CREATE_OR_REPLACE; break; case RecordExistsAction.REPLACE_ONLY: infoAttr |= Command.INFO3_REPLACE_ONLY; break; case RecordExistsAction.CREATE_ONLY: writeAttr |= Command.INFO2_CREATE_ONLY; break; } switch (policy.generationPolicy) { case GenerationPolicy.NONE: break; case GenerationPolicy.EXPECT_GEN_EQUAL: generation = policy.generation; writeAttr |= Command.INFO2_GENERATION; break; case GenerationPolicy.EXPECT_GEN_GT: generation = policy.generation; writeAttr |= Command.INFO2_GENERATION_GT; break; } if (policy.commitLevel == CommitLevel.COMMIT_MASTER) { infoAttr |= Command.INFO3_COMMIT_MASTER; } if (policy.consistencyLevel == ConsistencyLevel.CONSISTENCY_ALL) { readAttr |= Command.INFO1_CONSISTENCY_ALL; } // Write all header data except total size which must be written last. dataBuffer[8] = MSG_REMAINING_HEADER_SIZE; // Message header length. dataBuffer[9] = (byte)readAttr; dataBuffer[10] = (byte)writeAttr; dataBuffer[11] = (byte)infoAttr; dataBuffer[12] = 0; // unused dataBuffer[13] = 0; // clear the result code ByteUtil.IntToBytes((uint)generation, dataBuffer, 14); ByteUtil.IntToBytes((uint)policy.expiration, dataBuffer, 18); // Initialize timeout. It will be written later. dataBuffer[22] = 0; dataBuffer[23] = 0; dataBuffer[24] = 0; dataBuffer[25] = 0; ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, 26); ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, 28); dataOffset = MSG_TOTAL_HEADER_SIZE; }
/// <summary> /// Get name. /// </summary> public string GetName() { int len = nameEnd - nameBegin; return(ByteUtil.Utf8ToString(parent.buffer, nameBegin, len)); }
public string GetTruncatedResponse() { int max = (length > 200) ? 200 : length; return(ByteUtil.Utf8ToString(buffer, 0, max)); }
private void EstimateOperationSize(Bin bin) { dataOffset += ByteUtil.EstimateSizeUtf8(bin.name) + OPERATION_HEADER_SIZE; dataOffset += bin.value.EstimateSize(); }
protected internal override bool ParseRecordResults(int receiveSize) { // Read/parse remaining message bytes one record at a time. dataOffset = 0; while (dataOffset < receiveSize) { ReadBytes(MSG_REMAINING_HEADER_SIZE); int resultCode = dataBuffer[5]; if (resultCode != 0) { if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { return(false); } throw new AerospikeException(resultCode); } byte info3 = dataBuffer[3]; // If this is the end marker of the response, do not proceed further if ((info3 & Command.INFO3_LAST) == Command.INFO3_LAST) { return(false); } int generation = ByteUtil.BytesToInt(dataBuffer, 6); int expiration = ByteUtil.BytesToInt(dataBuffer, 10); int fieldCount = ByteUtil.BytesToShort(dataBuffer, 18); int opCount = ByteUtil.BytesToShort(dataBuffer, 20); Key key = ParseKey(fieldCount); // Parse bins. Dictionary <string, object> bins = null; for (int i = 0; i < opCount; i++) { ReadBytes(8); int opSize = ByteUtil.BytesToInt(dataBuffer, 0); byte particleType = dataBuffer[5]; byte nameSize = dataBuffer[7]; ReadBytes(nameSize); string name = ByteUtil.Utf8ToString(dataBuffer, 0, nameSize); int particleBytesSize = (int)(opSize - (4 + nameSize)); ReadBytes(particleBytesSize); object value = ByteUtil.BytesToParticle(particleType, dataBuffer, 0, particleBytesSize); if (bins == null) { bins = new Dictionary <string, object>(); } bins[name] = value; } Record record = new Record(bins, generation, expiration); if (!valid) { throw new AerospikeException.QueryTerminated(); } if (!recordSet.Put(new KeyRecord(key, record))) { Stop(); throw new AerospikeException.QueryTerminated(); } } return(true); }
private void EstimateOperationSize(Operation operation) { dataOffset += ByteUtil.EstimateSizeUtf8(operation.binName) + OPERATION_HEADER_SIZE; dataOffset += operation.value.EstimateSize(); }
private void ReceiveEvent() { //Log.Info("Receive Event: " + eventArgs.BytesTransferred + "," + dataOffset + "," + dataLength + "," + inHeader); if (usingSocketTimeout) { eventReceived = true; } if (eventArgs.BytesTransferred <= 0) { ConnectionFailed(new AerospikeException.Connection("Connection closed")); return; } dataOffset += eventArgs.BytesTransferred; if (dataOffset < dataLength) { eventArgs.SetBuffer(dataOffset, dataLength - dataOffset); Receive(); return; } dataOffset = segment.offset; if (inHeader) { int length = (int)(ByteUtil.BytesToLong(dataBuffer, dataOffset) & 0xFFFFFFFFFFFFL); if (length <= 0) { ReceiveBegin(); return; } inHeader = false; if (length > segment.size) { ResizeBuffer(length); dataBuffer = segment.buffer; dataOffset = segment.offset; } eventArgs.SetBuffer(dataBuffer, dataOffset, length); dataLength = dataOffset + length; Receive(); } else { if (inAuthenticate) { inAuthenticate = false; inHeader = true; int resultCode = dataBuffer[dataOffset + 1]; if (resultCode != 0) { throw new AerospikeException(resultCode); } ConnectionReady(); return; } ParseCommand(); } }
/// <summary> /// Header write for write operations. /// </summary> private void WriteHeader(WritePolicy policy, int readAttr, int writeAttr, int fieldCount, int operationCount) { // Set flags. int generation = 0; int infoAttr = 0; switch (policy.recordExistsAction) { case RecordExistsAction.UPDATE: break; case RecordExistsAction.UPDATE_ONLY: infoAttr |= Command.INFO3_UPDATE_ONLY; break; case RecordExistsAction.REPLACE: infoAttr |= Command.INFO3_CREATE_OR_REPLACE; break; case RecordExistsAction.REPLACE_ONLY: infoAttr |= Command.INFO3_REPLACE_ONLY; break; case RecordExistsAction.CREATE_ONLY: writeAttr |= Command.INFO2_CREATE_ONLY; break; } switch (policy.generationPolicy) { case GenerationPolicy.NONE: break; case GenerationPolicy.EXPECT_GEN_EQUAL: generation = policy.generation; writeAttr |= Command.INFO2_GENERATION; break; case GenerationPolicy.EXPECT_GEN_GT: generation = policy.generation; writeAttr |= Command.INFO2_GENERATION_GT; break; } if (policy.commitLevel == CommitLevel.COMMIT_MASTER) { infoAttr |= Command.INFO3_COMMIT_MASTER; } if (policy.linearizeRead) { infoAttr |= Command.INFO3_LINEARIZE_READ; } if (policy.consistencyLevel == ConsistencyLevel.CONSISTENCY_ALL) { readAttr |= Command.INFO1_CONSISTENCY_ALL; } if (policy.durableDelete) { writeAttr |= Command.INFO2_DURABLE_DELETE; } dataOffset += 8; // Write all header data except total size which must be written last. dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. dataBuffer[dataOffset++] = (byte)readAttr; dataBuffer[dataOffset++] = (byte)writeAttr; dataBuffer[dataOffset++] = (byte)infoAttr; dataBuffer[dataOffset++] = 0; // unused dataBuffer[dataOffset++] = 0; // clear the result code dataOffset += ByteUtil.IntToBytes((uint)generation, dataBuffer, dataOffset); dataOffset += ByteUtil.IntToBytes((uint)policy.expiration, dataBuffer, dataOffset); dataOffset += ByteUtil.IntToBytes((uint)policy.totalTimeout, dataBuffer, dataOffset); dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); }
private void WriteFieldHeader(int size, int type) { ByteUtil.IntToBytes((uint)size + 1, dataBuffer, dataOffset); dataOffset += 4; dataBuffer[dataOffset++] = (byte)type; }
protected internal override void ParseRow(Key key) { int offset = batch.offsets[index++]; Key keyOrig = keys[offset]; if (Util.ByteArrayEquals(key.digest, keyOrig.digest)) { if (resultCode == 0) { Record record = ParseRecord(); listener.OnRecord(keyOrig, record); } else { listener.OnRecord(keyOrig, null); } } else { throw new AerospikeException.Parse("Unexpected batch key returned: " + key.ns + ',' + ByteUtil.BytesToHexString(key.digest) + ',' + index + ',' + offset); } }
public void SetBatchRead(BatchPolicy policy, List <BatchRead> records, BatchNode batch) { // Estimate full row size int[] offsets = batch.offsets; int max = batch.offsetsSize; ushort fieldCount = policy.sendSetName ? (ushort)2 : (ushort)1; BatchRead prev = null; Begin(); dataOffset += FIELD_HEADER_SIZE + 5; for (int i = 0; i < max; i++) { BatchRead record = records[offsets[i]]; Key key = record.key; string[] binNames = record.binNames; dataOffset += key.digest.Length + 4; // Avoid relatively expensive full equality checks for performance reasons. // Use reference equality only in hope that common namespaces/bin names are set from // fixed variables. It's fine if equality not determined correctly because it just // results in more space used. The batch will still be correct. if (prev != null && prev.key.ns == key.ns && (!policy.sendSetName || prev.key.setName == key.setName) && prev.binNames == binNames && prev.readAllBins == record.readAllBins) { // Can set repeat previous namespace/bin names to save space. dataOffset++; } else { // Estimate full header, namespace and bin names. dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE + 6; if (policy.sendSetName) { dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; } if (binNames != null) { foreach (string binName in binNames) { EstimateOperationSize(binName); } } prev = record; } } SizeBuffer(); int readAttr = Command.INFO1_READ; if (policy.consistencyLevel == ConsistencyLevel.CONSISTENCY_ALL) { readAttr |= Command.INFO1_CONSISTENCY_ALL; } WriteHeader(policy, readAttr | Command.INFO1_BATCH, 0, 1, 0); int fieldSizeOffset = dataOffset; WriteFieldHeader(0, policy.sendSetName ? FieldType.BATCH_INDEX_WITH_SET : FieldType.BATCH_INDEX); // Need to update size at end ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); dataOffset += 4; dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0; prev = null; for (int i = 0; i < max; i++) { int index = offsets[i]; ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); dataOffset += 4; BatchRead record = records[index]; Key key = record.key; string[] binNames = record.binNames; byte[] digest = key.digest; Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); dataOffset += digest.Length; // Avoid relatively expensive full equality checks for performance reasons. // Use reference equality only in hope that common namespaces/bin names are set from // fixed variables. It's fine if equality not determined correctly because it just // results in more space used. The batch will still be correct. if (prev != null && prev.key.ns == key.ns && (!policy.sendSetName || prev.key.setName == key.setName) && prev.binNames == binNames && prev.readAllBins == record.readAllBins) { // Can set repeat previous namespace/bin names to save space. dataBuffer[dataOffset++] = 1; // repeat } else { // Write full header, namespace and bin names. dataBuffer[dataOffset++] = 0; // do not repeat if (binNames != null && binNames.Length != 0) { dataBuffer[dataOffset++] = (byte)readAttr; dataOffset += ByteUtil.ShortToBytes(fieldCount, dataBuffer, dataOffset); dataOffset += ByteUtil.ShortToBytes((ushort)binNames.Length, dataBuffer, dataOffset); WriteField(key.ns, FieldType.NAMESPACE); if (policy.sendSetName) { WriteField(key.setName, FieldType.TABLE); } foreach (string binName in binNames) { WriteOperation(binName, Operation.Type.READ); } } else { dataBuffer[dataOffset++] = (byte)(readAttr | (record.readAllBins ? Command.INFO1_GET_ALL : Command.INFO1_NOBINDATA)); dataOffset += ByteUtil.ShortToBytes(fieldCount, dataBuffer, dataOffset); dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset); WriteField(key.ns, FieldType.NAMESPACE); if (policy.sendSetName) { WriteField(key.setName, FieldType.TABLE); } } prev = record; } } // Write real field size. ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); End(); }
protected internal override void ParseRow(Key key) { if (opCount > 0) { throw new AerospikeException.Parse("Received bins that were not requested!"); } int offset = batch.offsets[index++]; Key keyOrig = keys[offset]; if (Util.ByteArrayEquals(key.digest, keyOrig.digest)) { listener.OnExists(keyOrig, resultCode == 0); } else { throw new AerospikeException.Parse("Unexpected batch key returned: " + key.ns + ',' + ByteUtil.BytesToHexString(key.digest) + ',' + index + ',' + offset); } }
public void SetBatchRead(BatchPolicy policy, Key[] keys, BatchNode batch, string[] binNames, int readAttr) { // Estimate full row size int[] offsets = batch.offsets; int max = batch.offsetsSize; ushort fieldCount = policy.sendSetName ? (ushort)2 : (ushort)1; // Calculate size of bin names. int binNameSize = 0; int operationCount = 0; if (binNames != null) { foreach (string binName in binNames) { binNameSize += ByteUtil.EstimateSizeUtf8(binName) + OPERATION_HEADER_SIZE; } operationCount = binNames.Length; } // Estimate buffer size. Begin(); dataOffset += FIELD_HEADER_SIZE + 5; Key prev = null; for (int i = 0; i < max; i++) { Key key = keys[offsets[i]]; dataOffset += key.digest.Length + 4; // Try reference equality in hope that namespace for all keys is set from a fixed variable. if (prev != null && prev.ns == key.ns && (!policy.sendSetName || prev.setName == key.setName)) { // Can set repeat previous namespace/bin names to save space. dataOffset++; } else { // Estimate full header, namespace and bin names. dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE + 6; if (policy.sendSetName) { dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; } dataOffset += binNameSize; prev = key; } } SizeBuffer(); if (policy.consistencyLevel == ConsistencyLevel.CONSISTENCY_ALL) { readAttr |= Command.INFO1_CONSISTENCY_ALL; } WriteHeader(policy, readAttr | Command.INFO1_BATCH, 0, 1, 0); int fieldSizeOffset = dataOffset; WriteFieldHeader(0, policy.sendSetName ? FieldType.BATCH_INDEX_WITH_SET : FieldType.BATCH_INDEX); // Need to update size at end ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); dataOffset += 4; dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0; prev = null; for (int i = 0; i < max; i++) { int index = offsets[i]; ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); dataOffset += 4; Key key = keys[index]; byte[] digest = key.digest; Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); dataOffset += digest.Length; // Try reference equality in hope that namespace for all keys is set from a fixed variable. if (prev != null && prev.ns == key.ns && (!policy.sendSetName || prev.setName == key.setName)) { // Can set repeat previous namespace/bin names to save space. dataBuffer[dataOffset++] = 1; // repeat } else { // Write full header, namespace and bin names. dataBuffer[dataOffset++] = 0; // do not repeat dataBuffer[dataOffset++] = (byte)readAttr; dataOffset += ByteUtil.ShortToBytes(fieldCount, dataBuffer, dataOffset); dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); WriteField(key.ns, FieldType.NAMESPACE); if (policy.sendSetName) { WriteField(key.setName, FieldType.TABLE); } if (binNames != null) { foreach (string binName in binNames) { WriteOperation(binName, Operation.Type.READ); } } prev = key; } } // Write real field size. ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); End(); }
/// <summary> /// Return key elements in string format. /// </summary> public override string ToString() { return(this.ns + ":" + this.setName + ":" + this.userKey + ":" + ByteUtil.BytesToHexString(this.digest)); }
public void SetScan(ScanPolicy policy, string ns, string setName, string[] binNames, ulong taskId) { Begin(); int fieldCount = 0; if (ns != null) { dataOffset += ByteUtil.EstimateSizeUtf8(ns) + FIELD_HEADER_SIZE; fieldCount++; } if (setName != null) { dataOffset += ByteUtil.EstimateSizeUtf8(setName) + FIELD_HEADER_SIZE; fieldCount++; } // Estimate scan options size. dataOffset += 2 + FIELD_HEADER_SIZE; fieldCount++; // Estimate scan timeout size. dataOffset += 4 + FIELD_HEADER_SIZE; fieldCount++; // Estimate taskId size. dataOffset += 8 + FIELD_HEADER_SIZE; fieldCount++; if (binNames != null) { foreach (String binName in binNames) { EstimateOperationSize(binName); } } SizeBuffer(); byte readAttr = (byte)Command.INFO1_READ; if (!policy.includeBinData) { readAttr |= (byte)Command.INFO1_NOBINDATA; } int operationCount = (binNames == null) ? 0 : binNames.Length; WriteHeader(policy, readAttr, 0, fieldCount, operationCount); if (ns != null) { WriteField(ns, FieldType.NAMESPACE); } if (setName != null) { WriteField(setName, FieldType.TABLE); } WriteFieldHeader(2, FieldType.SCAN_OPTIONS); byte priority = (byte)policy.priority; priority <<= 4; if (policy.failOnClusterChange) { priority |= 0x08; } dataBuffer[dataOffset++] = priority; dataBuffer[dataOffset++] = (byte)policy.scanPercent; // Write scan timeout WriteFieldHeader(4, FieldType.SCAN_TIMEOUT); dataOffset += ByteUtil.IntToBytes((uint)policy.socketTimeout, dataBuffer, dataOffset); // Write taskId field WriteFieldHeader(8, FieldType.TRAN_ID); dataOffset += ByteUtil.LongToBytes(taskId, dataBuffer, dataOffset); if (binNames != null) { foreach (String binName in binNames) { WriteOperation(binName, Operation.Type.READ); } } End(); }
public Cluster(ClientPolicy policy, Host[] hosts) { this.clusterName = policy.clusterName; tlsPolicy = policy.tlsPolicy; this.authMode = policy.authMode; // Default TLS names when TLS enabled. if (tlsPolicy != null) { bool useClusterName = HasClusterName; for (int i = 0; i < hosts.Length; i++) { Host host = hosts[i]; if (host.tlsName == null) { string tlsName = useClusterName ? clusterName : host.name; hosts[i] = new Host(host.name, tlsName, host.port); } } } else { if (authMode == AuthMode.EXTERNAL) { throw new AerospikeException("TLS is required for authentication mode: " + authMode); } } this.seeds = hosts; if (policy.user != null && policy.user.Length > 0) { this.user = ByteUtil.StringToUtf8(policy.user); // Only store clear text password if external authentication is used. if (authMode != AuthMode.INTERNAL) { this.password = ByteUtil.StringToUtf8(policy.password); } string pass = policy.password; if (pass == null) { pass = ""; } if (!(pass.Length == 60 && pass.StartsWith("$2a$"))) { pass = AdminCommand.HashPassword(pass); } this.passwordHash = ByteUtil.StringToUtf8(pass); } minConnsPerNode = policy.minConnsPerNode; maxConnsPerNode = policy.maxConnsPerNode; if (minConnsPerNode > maxConnsPerNode) { throw new AerospikeException("Invalid connection range: " + minConnsPerNode + " - " + maxConnsPerNode); } connPoolsPerNode = policy.connPoolsPerNode; connectionTimeout = policy.timeout; loginTimeout = policy.loginTimeout; maxSocketIdleMillis = 1000 * ((policy.maxSocketIdle <= MaxSocketIdleSecondLimit) ? policy.maxSocketIdle : MaxSocketIdleSecondLimit); tendInterval = policy.tendInterval; ipMap = policy.ipMap; useServicesAlternate = policy.useServicesAlternate; rackAware = policy.rackAware; rackId = policy.rackId; aliases = new Dictionary <Host, Node>(); nodesMap = new Dictionary <string, Node>(); nodes = new Node[0]; partitionMap = new Dictionary <string, Partitions>(); cancel = new CancellationTokenSource(); cancelToken = cancel.Token; }
protected internal void SetQuery(Policy policy, Statement statement, bool write) { byte[] functionArgBuffer = null; int fieldCount = 0; int filterSize = 0; int binNameSize = 0; Begin(); if (statement.ns != null) { dataOffset += ByteUtil.EstimateSizeUtf8(statement.ns) + FIELD_HEADER_SIZE; fieldCount++; } if (statement.indexName != null) { dataOffset += ByteUtil.EstimateSizeUtf8(statement.indexName) + FIELD_HEADER_SIZE; fieldCount++; } if (statement.setName != null) { dataOffset += ByteUtil.EstimateSizeUtf8(statement.setName) + FIELD_HEADER_SIZE; fieldCount++; } // Allocate space for TaskId field. dataOffset += 8 + FIELD_HEADER_SIZE; fieldCount++; if (statement.filter != null) { IndexCollectionType type = statement.filter.CollectionType; if (type != IndexCollectionType.DEFAULT) { dataOffset += FIELD_HEADER_SIZE + 1; fieldCount++; } dataOffset += FIELD_HEADER_SIZE; filterSize++; // num filters filterSize += statement.filter.EstimateSize(); dataOffset += filterSize; fieldCount++; // Query bin names are specified as a field (Scan bin names are specified later as operations) if (statement.binNames != null && statement.binNames.Length > 0) { dataOffset += FIELD_HEADER_SIZE; binNameSize++; // num bin names foreach (string binName in statement.binNames) { binNameSize += ByteUtil.EstimateSizeUtf8(binName) + 1; } dataOffset += binNameSize; fieldCount++; } } else { // Calling query with no filters is more efficiently handled by a primary index scan. // Estimate scan options size. dataOffset += 2 + FIELD_HEADER_SIZE; fieldCount++; // Estimate scan timeout size. dataOffset += 4 + FIELD_HEADER_SIZE; fieldCount++; } PredExp[] predExp = statement.PredExp; int predSize = 0; if (predExp != null) { dataOffset += FIELD_HEADER_SIZE; predSize = PredExp.EstimateSize(predExp); dataOffset += predSize; fieldCount++; } if (statement.functionName != null) { dataOffset += FIELD_HEADER_SIZE + 1; // udf type dataOffset += ByteUtil.EstimateSizeUtf8(statement.packageName) + FIELD_HEADER_SIZE; dataOffset += ByteUtil.EstimateSizeUtf8(statement.functionName) + FIELD_HEADER_SIZE; if (statement.functionArgs.Length > 0) { functionArgBuffer = Packer.Pack(statement.functionArgs); } else { functionArgBuffer = new byte[0]; } dataOffset += FIELD_HEADER_SIZE + functionArgBuffer.Length; fieldCount += 4; } if (statement.filter == null) { if (statement.binNames != null) { foreach (string binName in statement.binNames) { EstimateOperationSize(binName); } } } SizeBuffer(); int operationCount = (statement.filter == null && statement.binNames != null) ? statement.binNames.Length : 0; if (write) { WriteHeader((WritePolicy)policy, Command.INFO1_READ, Command.INFO2_WRITE, fieldCount, operationCount); } else { QueryPolicy qp = (QueryPolicy)policy; int readAttr = qp.includeBinData ? Command.INFO1_READ : Command.INFO1_READ | Command.INFO1_NOBINDATA; WriteHeader(policy, readAttr, 0, fieldCount, operationCount); } if (statement.ns != null) { WriteField(statement.ns, FieldType.NAMESPACE); } if (statement.indexName != null) { WriteField(statement.indexName, FieldType.INDEX_NAME); } if (statement.setName != null) { WriteField(statement.setName, FieldType.TABLE); } // Write taskId field WriteFieldHeader(8, FieldType.TRAN_ID); ByteUtil.LongToBytes(statement.taskId, dataBuffer, dataOffset); dataOffset += 8; if (statement.filter != null) { IndexCollectionType type = statement.filter.CollectionType; if (type != IndexCollectionType.DEFAULT) { WriteFieldHeader(1, FieldType.INDEX_TYPE); dataBuffer[dataOffset++] = (byte)type; } WriteFieldHeader(filterSize, FieldType.INDEX_RANGE); dataBuffer[dataOffset++] = (byte)1; dataOffset = statement.filter.Write(dataBuffer, dataOffset); // Query bin names are specified as a field (Scan bin names are specified later as operations) if (statement.binNames != null && statement.binNames.Length > 0) { WriteFieldHeader(binNameSize, FieldType.QUERY_BINLIST); dataBuffer[dataOffset++] = (byte)statement.binNames.Length; foreach (string binName in statement.binNames) { int len = ByteUtil.StringToUtf8(binName, dataBuffer, dataOffset + 1); dataBuffer[dataOffset] = (byte)len; dataOffset += len + 1; } } } else { // Calling query with no filters is more efficiently handled by a primary index scan. WriteFieldHeader(2, FieldType.SCAN_OPTIONS); byte priority = (byte)policy.priority; priority <<= 4; if (!write && ((QueryPolicy)policy).failOnClusterChange) { priority |= 0x08; } dataBuffer[dataOffset++] = priority; dataBuffer[dataOffset++] = (byte)100; // Write scan timeout WriteFieldHeader(4, FieldType.SCAN_TIMEOUT); dataOffset += ByteUtil.IntToBytes((uint)policy.socketTimeout, dataBuffer, dataOffset); } if (predExp != null) { WriteFieldHeader(predSize, FieldType.PREDEXP); dataOffset = PredExp.Write(predExp, dataBuffer, dataOffset); } if (statement.functionName != null) { WriteFieldHeader(1, FieldType.UDF_OP); dataBuffer[dataOffset++] = (statement.returnData) ? (byte)1 : (byte)2; WriteField(statement.packageName, FieldType.UDF_PACKAGE_NAME); WriteField(statement.functionName, FieldType.UDF_FUNCTION); WriteField(functionArgBuffer, FieldType.UDF_ARGLIST); } // Scan bin names are specified after all fields. if (statement.filter == null) { if (statement.binNames != null) { foreach (string binName in statement.binNames) { WriteOperation(binName, Operation.Type.READ); } } } End(); }
protected internal sealed override void ParseResult(Connection conn) { // Read blocks of records. Do not use thread local receive buffer because each // block will likely be too big for a cache. Also, scan callbacks can nest // further database commands which would contend with the thread local receive buffer. // Instead, use separate heap allocated buffers. byte[] protoBuf = new byte[8]; byte[] buf = null; byte[] ubuf = null; int receiveSize; while (true) { // Read header conn.ReadFully(protoBuf, 8); long proto = ByteUtil.BytesToLong(protoBuf, 0); int size = (int)(proto & 0xFFFFFFFFFFFFL); if (size <= 0) { continue; } // Prepare buffer if (buf == null || size > buf.Length) { // Corrupted data streams can result in a huge length. // Do a sanity check here. if (size > MAX_BUFFER_SIZE) { throw new AerospikeException("Invalid proto size: " + size); } int capacity = (size + 16383) & ~16383; // Round up in 16KB increments. buf = new byte[capacity]; } // Read remaining message bytes in group. conn.ReadFully(buf, size); conn.UpdateLastUsed(); ulong type = (ulong)((proto >> 48) & 0xff); if (type == Command.AS_MSG_TYPE) { dataBuffer = buf; dataOffset = 0; receiveSize = size; } else if (type == Command.MSG_TYPE_COMPRESSED) { int usize = (int)ByteUtil.BytesToLong(buf, 0); if (ubuf == null || usize > ubuf.Length) { if (usize > MAX_BUFFER_SIZE) { throw new AerospikeException("Invalid proto size: " + usize); } int capacity = (usize + 16383) & ~16383; // Round up in 16KB increments. ubuf = new byte[capacity]; } ByteUtil.Decompress(buf, 8, size, ubuf, usize); dataBuffer = ubuf; dataOffset = 8; receiveSize = usize; } else { throw new AerospikeException("Invalid proto type: " + type + " Expected: " + Command.AS_MSG_TYPE); } if (!ParseGroup(receiveSize)) { break; } } }
private void ReceiveEvent() { //Log.Info("Receive Event: " + eventArgs.BytesTransferred + "," + dataOffset + "," + dataLength + "," + inHeader); if (usingSocketTimeout) { eventReceived = true; } if (eventArgs.BytesTransferred <= 0) { ConnectionFailed(new AerospikeException.Connection("Connection closed")); return; } dataOffset += eventArgs.BytesTransferred; if (dataOffset < dataLength) { eventArgs.SetBuffer(dataOffset, dataLength - dataOffset); Receive(); return; } dataOffset = segment.offset; if (inHeader) { int length = (int)(ByteUtil.BytesToLong(dataBuffer, dataOffset) & 0xFFFFFFFFFFFFL); if (length <= 0) { ReceiveBegin(); return; } inHeader = false; if (length > segment.size) { ResizeBuffer(length); dataBuffer = segment.buffer; dataOffset = segment.offset; } eventArgs.SetBuffer(dataBuffer, dataOffset, length); dataLength = dataOffset + length; Receive(); } else { if (inAuthenticate) { inAuthenticate = false; inHeader = true; int resultCode = dataBuffer[dataOffset + 1]; if (resultCode != 0) { // Authentication failed. Session token probably expired. // Signal tend thread to perform node login, so future // transactions do not fail. node.SignalLogin(); // This is a rare event because the client tracks session // expiration and will relogin before session expiration. // Do not try to login on same socket because login can take // a long time and thousands of simultaneous logins could // overwhelm server. throw new AerospikeException(resultCode); } ConnectionReady(); return; } ParseCommand(); } }