private bool TryEnqueueWriteRequestLocked(uint streamId, WriteRequest wr) { if (streamId == 0 || wr.Header.Type == FrameType.WindowUpdate || wr.Header.Type == FrameType.ResetStream) { shared.WriteQueue.Enqueue(wr); if (wr.Header.Type == FrameType.ResetStream && streamId != 0) { this.RemoveStreamLocked(streamId); } return(true); } for (var i = 0; i < shared.Streams.Count; i++) { var stream = shared.Streams[i]; if (stream.StreamId == streamId) { if (wr.Header.HasEndOfStreamFlag) { stream.EndOfStreamQueued = true; shared.Streams[i] = stream; } stream.WriteQueue.Enqueue(wr); return(true); } } return(false); }
void Write(ChannelOutboundBuffer input) { while (true) { int size = input.Count; if (size == 0) { break; } List <ArraySegment <byte> > nioBuffers = input.GetSharedBufferList(); int nioBufferCnt = nioBuffers.Count; long expectedWrittenBytes = input.NioBufferSize; if (nioBufferCnt == 0) { this.WriteByteBuffers(input); return; } else { WriteRequest writeRequest = Recycler.Take(); writeRequest.Prepare((TcpChannelUnsafe)this.Unsafe, nioBuffers); this.tcp.Write(writeRequest); input.RemoveBytes(expectedWrittenBytes); } } }
public NTStatus WriteFile(out int numberOfBytesWritten, object handle, long offset, byte[] data) { numberOfBytesWritten = 0; WriteRequest request = new WriteRequest(); request.Header.CreditCharge = (ushort)Math.Ceiling((double)data.Length / BytesPerCredit); request.FileId = (FileID)handle; request.Offset = (ulong)offset; request.Data = data; TrySendCommand(request); do { SMB2Command response = m_client.WaitForCommand(SMB2CommandName.Write); if (response != null) { if (response.Header.Status == NTStatus.STATUS_SUCCESS && response is WriteResponse) { numberOfBytesWritten = (int)((WriteResponse)response).Count; } else if (response.Header.Status == NTStatus.STATUS_PENDING) { continue; } return(response.Header.Status); } else { return(NTStatus.STATUS_INVALID_SMB); } } while (true); }
public void write_new_value_locally(Partition partition, WriteRequest request) { DataStoreKey key = Utilities.ConvertKeyDtoToDomain(request.ObjectKey); DataStoreValue value = Utilities.ConvertValueDtoToDomain(request.Object); partition.addNewOrUpdateExisting(key, value); }
public override Task <WriteReply> Write(WriteRequest request, ServerCallContext context) { WaitUnfreeze(); var partitionId = request.PartitionId; var masterId = _gigaStorage.GetMaster(partitionId); var currentServerId = _gigaStorage.ServerId; if (currentServerId != masterId) { Console.WriteLine($"This server (id: {currentServerId}) is not the master server for partition {partitionId}."); return(Task.FromResult(new WriteReply { MasterId = masterId })); } _gigaStorage.Write(request.PartitionId, request.ObjectId, request.Value); // The current server is already the master for this partition return(Task.FromResult(new WriteReply { MasterId = currentServerId })); }
public static WriteRequest Get(ArraySegment <byte> buffer, ArraySegment <byte>[] payload, Action <object> callback, object state) { WriteRequest request = writeRequestPool.Take(); request.Initialize(buffer, payload, callback, state); return(request); }
internal static SMB1Command GetWriteResponse(SMB1Header header, WriteRequest request, ISMBShare share, SMB1ConnectionState state) { SMB1Session session = state.GetSession(header.UID); OpenFileObject openFile = session.GetOpenFileObject(request.FID); if (openFile == null) { state.LogToServer(Severity.Verbose, "Write failed. Invalid FID. (UID: {0}, TID: {1}, FID: {2})", header.UID, header.TID, request.FID); header.Status = NTStatus.STATUS_INVALID_HANDLE; return(new ErrorResponse(request.CommandName)); } if (share is FileSystemShare) { if (!((FileSystemShare)share).HasWriteAccess(session.SecurityContext, openFile.Path)) { state.LogToServer(Severity.Verbose, "Write to '{0}{1}' failed. User '{2}' was denied access.", share.Name, openFile.Path, session.UserName); header.Status = NTStatus.STATUS_ACCESS_DENIED; return(new ErrorResponse(request.CommandName)); } } int numberOfBytesWritten; header.Status = share.FileStore.WriteFile(out numberOfBytesWritten, openFile.Handle, request.WriteOffsetInBytes, request.Data); if (header.Status != NTStatus.STATUS_SUCCESS) { state.LogToServer(Severity.Verbose, "Write to '{0}{1}' failed. NTStatus: {2}. (FID: {3})", share.Name, openFile.Path, header.Status, request.FID); return(new ErrorResponse(request.CommandName)); } WriteResponse response = new WriteResponse(); response.CountOfBytesWritten = (ushort)numberOfBytesWritten; return(response); }
public async Task DatapoolReadOpcWriteAsync(OpcDatapoolModel dataModel, UaTcpSessionChannel channel) { // Read the Thermo tag value and write to the OPC Server DataValue value; switch (dataModel.TagInfo.Type) { case Thermo.Datapool.Datapool.dpTypes.FLOAT: value = new DataValue(new Variant(dataModel.TagInfo.AsDouble), sourceTimestamp: DateTime.Now, serverTimestamp: DateTime.Now); break; case Thermo.Datapool.Datapool.dpTypes.INT: value = new DataValue(new Variant(dataModel.TagInfo.AsInt), sourceTimestamp: DateTime.Now); break; case Thermo.Datapool.Datapool.dpTypes.STRING: value = new DataValue(new Variant(dataModel.TagInfo.AsString), sourceTimestamp: DateTime.Now); break; case Thermo.Datapool.Datapool.dpTypes.BOOL: value = new DataValue(new Variant(dataModel.TagInfo.AsBoolean), sourceTimestamp: DateTime.Now); break; default: throw new InvalidCastException($"Cannot write {dataModel.TagInfo.Type} data type to OPC."); } var writeRequest = new WriteRequest { NodesToWrite = new WriteValue[] { new WriteValue { NodeId = dataModel.NodeId, Value = value, AttributeId = AttributeIds.Value } } }; var response = await channel.WriteAsync(writeRequest); }
public override async void OnPropertyChanged() { var pi = this.Property; if (pi.CanRead) { try { var value = pi.GetValue(this.Target); var writeRequest = new WriteRequest { NodesToWrite = new[] { new WriteValue { NodeId = this.NodeId, AttributeId = this.AttributeId, IndexRange = this.IndexRange, Value = new DataValue(value) } } }; var writeResponse = await this.Target.Session.WriteAsync(writeRequest).ConfigureAwait(false); this.OnWriteResult(writeResponse.Results[0]); } catch (ServiceResultException ex) { this.OnWriteResult((uint)ex.HResult); } } }
public ConnectionContextResponse Write(WriteRequest request) { var response = new ConnectionContextResponse(); var session = CheckSession(ref request, ref response); if (session == null) { return(response); } var connection = session.CheckConnection(ref request, ref response); if (connection == null) { return(response); } try { var result = request.Data; Program.StaticXorEncoder(ref result, request.Cid); connection.Stream.Write(result, 0, result.Length); response.Success = true; response.Message = string.Empty; connection.WriteCount += result.Length; } catch (Exception ex) { response.Success = false; response.Message = Strings.SERVER_SIDE + ex.Message; } return(response); }
private unsafe void QueueSend(IByteBuffer buffer, IPEndPoint remoteEndPoint, Action <Udp, Exception> completion) { if (buffer is null) { ThrowHelper.ThrowArgumentNullException(ExceptionArgument.buffer); } if (remoteEndPoint is null) { ThrowHelper.ThrowArgumentNullException(ExceptionArgument.remoteEndPoint); } WriteRequest request = Loop.SendRequestPool.Take(); try { request.Prepare(buffer, (sendRequest, exception) => completion?.Invoke(this, exception)); NativeMethods.UdpSend( request.InternalHandle, InternalHandle, remoteEndPoint, request.Bufs, ref request.Size); } catch (Exception exception) { request.Release(); Log.Handle_faulted(HandleType, exception); throw; } }
public static async Task <object> WriteVar(int node, KeyValuePair <string, object>[] ValuesToSet) { try { WriteValue[] valuesToWrite = new WriteValue[ValuesToSet.Length]; for (int i = 0; i < ValuesToSet.Length; i++) { valuesToWrite[i] = new WriteValue { // you can parse the nodeId from a string. NodeId = NodeId.Parse("ns=" + node.ToString() + ";s=" + ValuesToSet[i].Key), // variable class nodes have a Value attribute. AttributeId = AttributeIds.Value, Value = new DataValue(ValuesToSet[i].Value) }; } var writeRequest = new WriteRequest { NodesToWrite = valuesToWrite }; // send the ReadRequest to the server. var writeResult = await channel.WriteAsync(writeRequest); return(writeResult); } catch (Exception ex) { //await channel.AbortAsync(); return(null); } }
internal async Task SetValueAsync(UaItem item, DataValue value) { item.CacheValue = value; NotifyPropertyChanged(item.DisplayName); var session = Session; if (session != null && session.Connected) { try { var writeRequest = new WriteRequest { NodesToWrite = { new WriteValue { NodeId = item.ResolvedNodeId, AttributeId = (uint)item.AttributeId, Value = value } } }; var writeResponse = await session.WriteAsync(writeRequest).ConfigureAwait(false); for (int i = 0; i < writeResponse.Results.Count; i++) { if (StatusCode.IsNotGood(writeResponse.Results[i])) { Trace.TraceError("Error writing value for NodeId {0} : {1}", writeRequest.NodesToWrite[i].NodeId, writeResponse.Results[i]); } } } catch (Exception ex) { Trace.TraceError("Error writing value for NodeId {0} : {1}", item.ResolvedNodeId, ex.Message); } } else { Trace.TraceError("Error writing value for NodeId {0} : {1}", item.ResolvedNodeId, "Session is null or not connected"); } }
private void SendMetricRequest(WriteRequest req) { var url = Configuration.GetValue <string>("prom_write_url"); var token = Configuration.GetValue <string>("prom_write_token"); if (!String.IsNullOrEmpty(url)) { MemoryStream stream = new MemoryStream(); req.WriteTo(stream); byte[] arrIn = stream.ToArray(); byte[] arr = SnappyCodec.Compress(arrIn); var handler = new HttpClientHandler(); var httpClient = new HttpClient(handler); HttpRequestMessage requestMessage = new HttpRequestMessage(new HttpMethod("POST"), url); requestMessage.Headers.Add("Authorization", $"Bearer {token}"); requestMessage.Content = new ByteArrayContent(arr); requestMessage.Content.Headers.ContentType = new MediaTypeHeaderValue("application/x-protobuf"); requestMessage.Content.Headers.ContentEncoding.Add("snappy"); HttpResponseMessage response = httpClient.SendAsync(requestMessage).Result; } }
// public methods public void Add(WriteRequest request, int originalIndex) { var index = _requests.Count; _indexMap = _indexMap.Add(index, originalIndex); _requests.Add(request); }
public void WriteBuffer(ArraySegment <byte> buffer, ArraySegment <byte>[] payload, Action <object> callback, object state) { WriteRequest request = WriteRequest.Get(buffer, payload, callback, state); bool doWrite = true; lock (this.syncRoot) { if (this.state == 2) { doWrite = false; } else { this.AddRequest(request); if (this.state == 1) { doWrite = false; } else { this.state = 1; } } } if (doWrite) { this.BuildWriteBuffer(); this.WriteBufferInternal(); } }
public override async Task <WriteResponse> Write(WriteRequest request, ServerCallContext context) { await dispatcher.OnWriteObject(ParseWriteRequest(request)); return(new WriteResponse { }); }
internal static SMB2Command GetWriteResponse(WriteRequest request, ISMBShare share, SMB2ConnectionState state) { SMB2Session session = state.GetSession(request.Header.SessionID); OpenFileObject openFile = session.GetOpenFileObject(request.FileId); if (openFile == null) { return(new ErrorResponse(request.CommandName, NTStatus.STATUS_FILE_CLOSED)); } if (share is FileSystemShare) { if (!((FileSystemShare)share).HasWriteAccess(session.SecurityContext, openFile.Path)) { state.LogToServer(Severity.Verbose, "Write to '{0}{1}' failed. User '{2}' was denied access.", share.Name, openFile.Path, session.UserName); return(new ErrorResponse(request.CommandName, NTStatus.STATUS_ACCESS_DENIED)); } } int numberOfBytesWritten; NTStatus writeStatus = share.FileStore.WriteFile(out numberOfBytesWritten, openFile.Handle, (long)request.Offset, request.Data); if (writeStatus != NTStatus.STATUS_SUCCESS) { return(new ErrorResponse(request.CommandName, writeStatus)); } WriteResponse response = new WriteResponse(); response.Count = (uint)numberOfBytesWritten; return(response); }
public void CreateWriteRequest() { WriteRequest command = new WriteRequest(@"C:\bla\blub.txt", TftpTransferMode.octet, null); Assert.AreEqual(command.Filename, @"C:\bla\blub.txt"); Assert.AreEqual(command.Mode, TftpTransferMode.octet); }
public async Task DeleteSongsFromDatabase() { var batchDynamodbList = new List <List <WriteRequest> >(); var dynamodbValues = new List <WriteRequest>(); var batchCounter = 0; foreach (var deleteSong in SongsToDelete) { var dbRecord = new WriteRequest { DeleteRequest = new DeleteRequest { Key = new Dictionary <string, AttributeValue> { { "song_number", new AttributeValue { S = deleteSong.SongNumber } } } } }; dynamodbValues.Add(dbRecord); batchCounter += 1; if (batchCounter % 25 == 0 || batchCounter == SongsToDelete.Count()) { batchDynamodbList.Add(dynamodbValues); dynamodbValues = new List <WriteRequest>(); } } foreach (var dynamodbList in batchDynamodbList) { LambdaLogger.Log($"***INFO: writing to delete from database (dynamodbList): {JsonConvert.SerializeObject(dynamodbList)}"); await DynamodbProvider.DynamoDbBatchWriteItemAsync(dynamodbList); } }
public override async Task <WriteResponse> Write(WriteRequest request, ServerCallContext context) { long offset; lock (_storage) { offset = _storage.LatestOffset; _storage.Add(offset, request.Message.ToByteArray()); } var members = _cluster.ClusterMembers.Except(new List <string> { _appConfig.HostName }); foreach (var member in members) { using var channel = GrpcChannel.ForAddress($"http://{member}:5000"); var client = new Follower.FollowerClient(channel); await client.ReplicateAsync(new ReplicationRequest { Offset = offset, Message = request.Message }); } return(new WriteResponse { Offset = offset }); }
public async Task <(NTStatus status, int numberOfBytesWritten)> WriteFileAsync(object handle, long offset, byte[] data, CancellationToken cancellationToken) { WriteRequest request = new WriteRequest(); request.Header.CreditCharge = (ushort)Math.Ceiling((double)data.Length / BytesPerCredit); request.FileId = (FileID)handle; request.Offset = (ulong)offset; request.Data = data; await TrySendCommandAsync(request, cancellationToken); SMB2Command response = m_client.WaitForCommand(SMB2CommandName.Write); if (response != null) { int numberOfBytesWritten = 0; if (response.Header.Status == NTStatus.STATUS_SUCCESS && response is WriteResponse) { numberOfBytesWritten = (int)((WriteResponse)response).Count; } return(response.Header.Status, numberOfBytesWritten); } return(NTStatus.STATUS_INVALID_SMB, 0); }
private static async Task <BatchWriteItemResponse> BuildDelete <Item>(Item[] items, string tableName, ItemConverter <Item> itemConverter, AmazonDynamoDBClient dynamodbClient) { List <WriteRequest> writeRequests = new List <WriteRequest>(); foreach (Item item in items) { DeleteRequest deleteRequest = new DeleteRequest() { Key = itemConverter.GetItemAsAttributes(item) }; WriteRequest writeRequest = new WriteRequest() { DeleteRequest = deleteRequest }; writeRequests.Add(writeRequest); } BatchWriteItemRequest batchWriteItemRequest = new BatchWriteItemRequest() { RequestItems = new Dictionary <string, List <WriteRequest> >() { { tableName, writeRequests } } }; return(await dynamodbClient.BatchWriteItemAsync(batchWriteItemRequest)); }
// protected methods protected override void SerializeRequest(BsonBinaryWriter bsonWriter, WriteRequest request) { var insertRequest = (InsertRequest)request; var document = insertRequest.Document; if (document == null) { throw new ArgumentException("Batch contains one or more null documents."); } var actualType = document.GetType(); if (_cachedSerializerType != actualType) { _cachedSerializer = BsonSerializer.LookupSerializer(actualType); _cachedSerializerType = actualType; } var serializer = _cachedSerializer; var serializationOptions = insertRequest.SerializationOptions ?? DocumentSerializationOptions.SerializeIdFirstInstance; var savedCheckElementNames = bsonWriter.CheckElementNames; try { bsonWriter.PushMaxDocumentSize(_args.MaxDocumentSize); bsonWriter.CheckElementNames = _args.CheckElementNames; serializer.Serialize(bsonWriter, insertRequest.NominalType, document, serializationOptions); } finally { bsonWriter.PopMaxDocumentSize(); bsonWriter.CheckElementNames = savedCheckElementNames; } }
private void AddRequest(BsonBinaryWriter bsonBinaryWriter, WriteRequest request) { _lastRequestPosition = bsonBinaryWriter.Buffer.Position; SerializeRequest(bsonBinaryWriter, request); _batchCount++; _batchLength = bsonBinaryWriter.Buffer.Position - _batchStartPosition; }
public override void Write(Tuple tuple) { var request = new WriteRequest(ClientRequestSeqNumber, EndpointURL, tuple); MulticastMessageWaitAll(View.Nodes, request); WaitMessage(request, View.Nodes); ClientRequestSeqNumber++; }
private AsyncUnaryCall <WriteReply> ClientWriteAsync(WriteRequest request) { if (_isAdvanced) { return(_client.WriteAdvancedAsync(request)); } return(_client.WriteAsync(request)); }
public void setWriteResult(WriteRequest request, WriteReply reply) { lock (writeResults) { writeResults.Add(request, reply); Monitor.PulseAll(writeResults); } }
public virtual async Task <IInfluxDataApiResponse> PostAsync(WriteRequest writeRequest) { var httpContent = new StringContent(writeRequest.GetLines(), Encoding.UTF8, "text/plain"); var requestParams = RequestParamsBuilder.BuildRequestParams(writeRequest.DbName, QueryParams.Precision, writeRequest.Precision); var result = await base.RequestAsync(HttpMethod.Post, RequestPaths.Write, requestParams, httpContent).ConfigureAwait(false); return(new InfluxDataApiWriteResponse(result.StatusCode, result.Body)); }
private WriteObjectArguments ParseWriteRequest(WriteRequest request) { return(new WriteObjectArguments { PartitionId = request.PartitionId, ObjectId = request.ObjectId, ObjectValue = request.ObjectValue }); }
// protected methods protected override void SerializeRequest(BsonBinaryWriter bsonWriter, WriteRequest request) { var deleteRequest = (DeleteRequest)request; bsonWriter.PushMaxDocumentSize(MaxDocumentSize); bsonWriter.WriteStartDocument(); bsonWriter.WriteName("q"); BsonSerializer.Serialize(bsonWriter, deleteRequest.Query ?? new QueryDocument()); bsonWriter.WriteInt32("limit", deleteRequest.Limit); bsonWriter.WriteEndDocument(); bsonWriter.PopMaxDocumentSize(); }
// protected methods protected override BulkWriteBatchResult EmulateSingleRequest(MongoConnection connection, WriteRequest request, int originalIndex) { var serverInstance = connection.ServerInstance; var insertRequest = (InsertRequest)request; var insertRequests = new[] { insertRequest }; var operationArgs = new BulkInsertOperationArgs( _args.AssignId, _args.CheckElementNames, _args.CollectionName, _args.DatabaseName, 1, // maxBatchCount serverInstance.MaxMessageLength, // maxBatchLength serverInstance.MaxDocumentSize, serverInstance.MaxWireDocumentSize, true, // isOrdered _args.ReaderSettings, insertRequests, _args.WriteConcern, _args.WriterSettings); var operation = new InsertOpcodeOperation(operationArgs); WriteConcernResult writeConcernResult = null; WriteConcernException writeConcernException = null; try { var operationResult = operation.Execute(connection); if (operationResult != null) { writeConcernResult = operationResult.First(); } } catch (WriteConcernException ex) { writeConcernResult = ex.WriteConcernResult; writeConcernException = ex; } var indexMap = new IndexMap.RangeBased(0, originalIndex, 1); return BulkWriteBatchResult.Create( insertRequest, writeConcernResult, writeConcernException, indexMap); }
// protected methods protected override void SerializeRequest(BsonBinaryWriter bsonWriter, WriteRequest request) { var updateRequest = (UpdateRequest)request; bsonWriter.PushMaxDocumentSize(MaxWireDocumentSize); bsonWriter.WriteStartDocument(); bsonWriter.WriteName("q"); BsonSerializer.Serialize(bsonWriter, updateRequest.Query ?? new QueryDocument()); bsonWriter.WriteName("u"); BsonSerializer.Serialize(bsonWriter, updateRequest.Update); if (updateRequest.IsMultiUpdate.HasValue) { bsonWriter.WriteBoolean("multi", updateRequest.IsMultiUpdate.Value); } if (updateRequest.IsUpsert.HasValue) { bsonWriter.WriteBoolean("upsert", updateRequest.IsUpsert.Value); } bsonWriter.WriteEndDocument(); bsonWriter.PopMaxDocumentSize(); }
// protected methods protected override BulkWriteBatchResult EmulateSingleRequest(MongoConnection connection, WriteRequest request, int originalIndex) { var serverInstance = connection.ServerInstance; var deleteRequest = (DeleteRequest)request; var deleteRequests = new[] { deleteRequest }; var operationArgs = new BulkDeleteOperationArgs( _args.CollectionName, _args.DatabaseName, 1, // maxBatchCount serverInstance.MaxMessageLength, // maxBatchLength true, // isOrdered _args.ReaderSettings, deleteRequests, _args.WriteConcern, _args.WriterSettings); var operation = new DeleteOpcodeOperation(operationArgs); WriteConcernResult writeConcernResult; WriteConcernException writeConcernException = null; try { writeConcernResult = operation.Execute(connection); } catch (WriteConcernException ex) { writeConcernResult = ex.WriteConcernResult; writeConcernException = ex; } var indexMap = new IndexMap.RangeBased(0, originalIndex, 1); return BulkWriteBatchResult.Create( deleteRequest, writeConcernResult, writeConcernException, indexMap); }
// protected methods protected abstract void SerializeRequest(BsonBinaryWriter bsonWriter, WriteRequest request);
/// <summary> /// Initializes the message with the body. /// </summary> public WriteMessage(WriteRequest WriteRequest) { this.WriteRequest = WriteRequest; }
void AddRequest(WriteRequest request) { if (this.lastRequest == null) { this.firstRequest = this.lastRequest = request; } else { this.lastRequest.Next = request; this.lastRequest = request; } }
// This function should be called in busy state (state == 1) void BuildWriteBuffer() { Fx.Assert(this.state == 1, "Should be busy at this time"); Fx.Assert(this.writeBuffer.Length == 0, "Cannot have payload in the write buffer at this point"); Fx.Assert(this.firstRequest != null && this.lastRequest != null, "Must have buffer to write at this time"); int bufferSize = this.writeBuffer.Capacity; // take a snapshot so we don't have to lock WriteRequest first = this.firstRequest; WriteRequest last = this.lastRequest; WriteRequest request = first; WriteRequest lastCompleted = null; while (true) { if (!request.WriteTo(this.writeBuffer)) { break; } lastCompleted = request; request = request.Next; if (lastCompleted == last) { break; } } this.writeAsyncEventArgs.SetBuffer(this.writeBuffer.Buffer, this.writeBuffer.Offset, this.writeBuffer.Length); if (lastCompleted != null) { this.writeAsyncEventArgs.UserToken = first; lock (this.syncRoot) { this.firstRequest = lastCompleted.Next; lastCompleted.Next = null; if (this.firstRequest == null) { this.lastRequest = null; } } } }
public static void Complete(WriteRequest request) { while (request != null) { request.Complete(); WriteRequest next = request.Next; request.Next = null; writeRequestPool.Return(request); request = next; } }
/// <summary> /// Invokes the Write service. /// </summary> public virtual ResponseHeader Write( RequestHeader requestHeader, WriteValueCollection nodesToWrite, out StatusCodeCollection results, out DiagnosticInfoCollection diagnosticInfos) { WriteRequest request = new WriteRequest(); WriteResponse response = null; request.RequestHeader = requestHeader; request.NodesToWrite = nodesToWrite; UpdateRequestHeader(request, requestHeader == null, "Write"); try { if (UseTransportChannel) { IServiceResponse genericResponse = TransportChannel.SendRequest(request); if (genericResponse == null) { throw new ServiceResultException(StatusCodes.BadUnknownResponse); } ValidateResponse(genericResponse.ResponseHeader); response = (WriteResponse)genericResponse; } else { WriteResponseMessage responseMessage = InnerChannel.Write(new WriteMessage(request)); if (responseMessage == null || responseMessage.WriteResponse == null) { throw new ServiceResultException(StatusCodes.BadUnknownResponse); } response = responseMessage.WriteResponse; ValidateResponse(response.ResponseHeader); } results = response.Results; diagnosticInfos = response.DiagnosticInfos; } finally { RequestCompleted(request, response, "Write"); } return response.ResponseHeader; }
// protected methods protected abstract BulkWriteBatchResult EmulateSingleRequest(MongoConnection connection, WriteRequest request, int originalIndex);
private IEnumerator<WriteRequest> WriteFlac(FlacWriter writer, int compressionLevel) { try { const int RiffHeaderLength = 12; const int RiffBlockHeaderLength = 8; const int MinFormatLength = 16; const int MaxFormatLength = 128; WriteRequest request = new WriteRequest(); byte[] waveHeader = new byte[RiffHeaderLength]; ArraySegment<byte> buffer = new ArraySegment<byte>(waveHeader); do { yield return request; if (!request.IsDataPresent) throw new FlacException("RIFF header is expected"); } while (FillBuffer(ref buffer, request)); if (waveHeader[0] != 'R' || waveHeader[1] != 'I' || waveHeader[2] != 'F' || waveHeader[3] != 'F' || waveHeader[8] != 'W' || waveHeader[9] != 'A' || waveHeader[10] != 'V' || waveHeader[11] != 'E') throw new FlacException("RIFF and WAVE header are expected"); long totalStreamLength = BitConverter.ToUInt32(waveHeader, 4); do { byte[] blockHeader = new byte[RiffBlockHeaderLength]; buffer = new ArraySegment<byte>(blockHeader); while (FillBuffer(ref buffer, request)) { yield return request; if (!request.IsDataPresent) throw new FlacException("RIFF block expected"); } if (blockHeader[0] == 'f' && blockHeader[1] == 'm' && blockHeader[2] == 't' && blockHeader[3] == ' ') { int formatBlockSize = BitConverter.ToInt32(blockHeader, 4); if (formatBlockSize < MinFormatLength || formatBlockSize > MaxFormatLength) throw new FlacException("Invalid format block size"); byte[] formatBlock = new byte[formatBlockSize]; buffer = new ArraySegment<byte>(formatBlock); while (FillBuffer(ref buffer, request)) { yield return request; if (!request.IsDataPresent) throw new FlacException("format block expected"); } if(BitConverter.ToUInt16(formatBlock, 0) != 1) throw new FlacException("Unsupported alignment in WAVE"); FlacStreaminfo streaminfo = new FlacStreaminfo(); streaminfo.ChannelsCount = BitConverter.ToUInt16(formatBlock, 2); streaminfo.SampleRate = BitConverter.ToInt32(formatBlock, 4); streaminfo.BitsPerSample = BitConverter.ToUInt16(formatBlock, 14); streaminfo.MinBlockSize = FlacCommons.DefaultBlockSize; streaminfo.MaxBlockSize = FlacCommons.DefaultBlockSize; EstimateMinAndMaxFrameSize(streaminfo); this.streaminfo = streaminfo; } else if (blockHeader[0] == 'd' && blockHeader[1] == 'a' && blockHeader[2] == 't' && blockHeader[3] == 'a') { uint dataBlockSize = BitConverter.ToUInt32(blockHeader, 4); if (streaminfo == null) throw new FlacException("Format block was not found"); int bytesPerInterChannelSample = (streaminfo.ChannelsCount * streaminfo.BitsPerSample) >> 3; long totalSamples = dataBlockSize / bytesPerInterChannelSample; streaminfo.TotalSampleCount = totalSamples; sampleTransform = WaveSampleTransformerFactory.CreateWaveSampleTransformer(streaminfo.BitsPerSample); try { writer.StartStream(streaminfo); int samplesInBuffer = streaminfo.MaxBlockSize; pcmBuffer = new byte[bytesPerInterChannelSample * samplesInBuffer]; long currentSample = 0; int[] samples = new int[streaminfo.ChannelsCount * samplesInBuffer]; while (currentSample + samplesInBuffer <= totalSamples) { buffer = new ArraySegment<byte>(pcmBuffer); while (FillBuffer(ref buffer, request)) { yield return request; if (!request.IsDataPresent) throw new FlacException("data block expected"); } sampleTransform.UnpackData(pcmBuffer, samples); writer.WriteSamples(samples); currentSample += samplesInBuffer; } if (currentSample < totalSamples) { int samplesLeft = (int)(totalSamples - currentSample); buffer = new ArraySegment<byte>(pcmBuffer, 0, bytesPerInterChannelSample * samplesLeft); while (FillBuffer(ref buffer, request)) { yield return request; if (!request.IsDataPresent) throw new FlacException("data block expected"); } samples = new int[streaminfo.ChannelsCount * samplesLeft]; sampleTransform.UnpackData(pcmBuffer, samples); writer.WriteSamples(samples); } } finally { writer.EndStream(); } break; } else // otherwise skip { uint dataBlockSize = BitConverter.ToUInt32(blockHeader, 4); byte[] extraData = new byte[(int)dataBlockSize]; buffer = new ArraySegment<byte>(extraData); while (FillBuffer(ref buffer, request)) { yield return request; if (!request.IsDataPresent) throw new FlacException("extra data is expected"); } } } while (request.IsDataPresent); } finally { writer.Close(); } }
/// <summary> /// Writes the item values to servers. /// </summary> /// <param name="requests">The requests.</param> private void Da20WriteItemValues(List<WriteRequest> requests) { //lock (m_groupLock) { //if (m_group == null) //{ ComDaGroup m_group = new ComDaGroup(this, false); //} try { int count1 = 0; GroupItem[] items = new GroupItem[requests.Count]; WriteRequest[] addItemRequests = new WriteRequest[requests.Count]; object[] convertedValues = new object[requests.Count]; // create the items in the temporary group. for (int ii = 0; ii < requests.Count; ii++) { WriteRequest request = requests[ii]; if (request == null) { continue; } // status code writes not supported. if (request.Value.StatusCode != StatusCodes.Good) { request.Error = ResultIds.E_NOTSUPPORTED; continue; } // timestamp writes not supported. if (request.Value.ServerTimestamp != DateTime.MinValue) { request.Error = ResultIds.E_NOTSUPPORTED; continue; } // timestamp writes not supported. if (request.Value.SourceTimestamp != DateTime.MinValue) { request.Error = ResultIds.E_NOTSUPPORTED; continue; } // convert to a DA compatible type. object convertedValue = null; request.Error = ComDaClientNodeManager.LocalToRemoteValue(request.Value.WrappedValue, out convertedValue); if (request.Error < 0) { continue; } // add the item. items[count1] = m_group.CreateItem(request.ItemId, 0, 0, true); addItemRequests[count1] = request; convertedValues[count1] = convertedValue; count1++; } // create the items on the server. m_group.ApplyChanges(); // build the list of values to write. int count2 = 0; int[] serverHandles = new int[count1]; object[] values = new object[count1]; WriteRequest[] writeRequests = new WriteRequest[count1]; for (int ii = 0; ii < count1; ii++) { // check for error on create. GroupItem item = items[ii]; WriteRequest request = addItemRequests[ii]; if (item.ErrorId < 0) { request.Error = item.ErrorId; continue; } serverHandles[count2] = item.ServerHandle; values[count2] = convertedValues[ii]; writeRequests[count2] = request; count2++; } if (count2 > 0) { // write values to the server. int[] errors = m_group.SyncWrite(serverHandles, values, count2); // read the errors. for (int ii = 0; ii < count2; ii++) { if (errors != null && errors.Length > ii) { writeRequests[ii].Error = errors[ii]; } else { writeRequests[ii].Error = ResultIds.E_FAIL; } } // delete the items. for (int ii = 0; ii < count1; ii++) { GroupItem item = items[ii]; if (item.ErrorId >= 0) { m_group.RemoveItem(item); } } m_group.ApplyChanges(); } } finally { // delete the group and items. m_group.Delete(); } } }
/// <summary> /// Writes the values using the DA3 interfaces. /// </summary> /// <param name="requests">The requests.</param> private void Da30WriteItemValues(List<WriteRequest> requests) { int count = 0; string[] itemIDs = new string[requests.Count]; OpcRcw.Da.OPCITEMVQT[] values = new OpcRcw.Da.OPCITEMVQT[requests.Count]; WriteRequest[] writeRequests = new WriteRequest[requests.Count]; for (int ii = 0; ii < requests.Count; ii++) { WriteRequest request = requests[ii]; if (request == null) { continue; } // convert to a DA compatible type. object convertedValue = null; request.Error = ComDaClientNodeManager.LocalToRemoteValue(request.Value.WrappedValue, out convertedValue); if (request.Error < 0) { continue; } itemIDs[count] = request.ItemId; values[count].vDataValue = convertedValue; values[count].bQualitySpecified = 0; values[count].bTimeStampSpecified = 0; // check for quality. values[count].wQuality = ComUtils.GetQualityCode(request.Value.StatusCode); if (values[count].wQuality != Qualities.OPC_QUALITY_GOOD) { values[count].bQualitySpecified = 1; } // check for server timestamp. if (request.Value.ServerTimestamp != DateTime.MinValue) { values[count].ftTimeStamp = ComUtils.GetFILETIME(request.Value.ServerTimestamp); values[count].bTimeStampSpecified = 1; } // ignore server timestamp if source timestamp is provided. if (request.Value.SourceTimestamp != DateTime.MinValue) { values[count].ftTimeStamp = ComUtils.GetFILETIME(request.Value.SourceTimestamp); values[count].bTimeStampSpecified = 1; } writeRequests[count] = request; count++; } IntPtr ppErrors = IntPtr.Zero; string methodName = "IOPCItemIO.WriteVQT"; try { IOPCItemIO server = BeginComCall<IOPCItemIO>(methodName, true); server.WriteVQT( count, itemIDs, values, out ppErrors); } catch (Exception e) { ComUtils.TraceComError(e, methodName); return; } finally { EndComCall(methodName); } int[] errors = ComUtils.GetInt32s(ref ppErrors, count, true); for (int ii = 0; ii < count; ii++) { writeRequests[ii].Error = errors[ii]; } }
/// <summary> /// Writes the item values to servers. /// </summary> /// <param name="requests">The requests.</param> private void WriteItemValues(List<WriteRequest> requests) { ComDaGroup group = new ComDaGroup(this, false); try { int count1 = 0; GroupItem[] items = new GroupItem[requests.Count]; WriteRequest[] addItemRequests = new WriteRequest[requests.Count]; // create the items in the temporary group. for (int ii = 0; ii < requests.Count; ii++) { WriteRequest request = requests[ii]; if (request == null) { continue; } // status code writes not supported. if (request.Value.StatusCode != StatusCodes.Good) { request.Error = ResultIds.E_NOTSUPPORTED; continue; } // timestamp writes not supported. if (request.Value.SourceTimestamp != DateTime.MinValue) { request.Error = ResultIds.E_NOTSUPPORTED; continue; } // add the item. items[count1] = group.CreateItem(request.ItemId, 0, 0, true); addItemRequests[count1] = request; count1++; } // create the items on the server. group.ApplyChanges(); // build the list of values to write. int count2 = 0; int[] serverHandles = new int[count1]; object[] values = new object[count1]; WriteRequest[] writeRequests = new WriteRequest[count1]; for (int ii = 0; ii < count1; ii++) { // check for error on create. GroupItem item = items[ii]; WriteRequest request = addItemRequests[ii]; if (item.ErrorId < 0) { request.Error = item.ErrorId; continue; } serverHandles[count2] = item.ServerHandle; values[count2] = ComUtils.GetVARIANT(request.Value.Value); writeRequests[count2] = request; count2++; } // write values to the server. int[] errors = group.SyncWrite(serverHandles, values, count2); // read the errors. for (int ii = 0; ii < count2; ii++) { writeRequests[ii].Error = errors[ii]; } } finally { // delete the group and items. group.Delete(); } }
private bool FillBuffer(ref ArraySegment<byte> buffer, WriteRequest currentRequest) { if (buffer.Count > currentRequest.Count) { Array.Copy(currentRequest.Data, currentRequest.Offset, buffer.Array, buffer.Offset, currentRequest.Count); buffer = new ArraySegment<byte>(buffer.Array, buffer.Offset + currentRequest.Count, buffer.Count - currentRequest.Count); currentRequest.Data = null; currentRequest.Count = 0; return true; } else { Array.Copy(currentRequest.Data, currentRequest.Offset, buffer.Array, buffer.Offset, buffer.Count); currentRequest.Offset += buffer.Count; currentRequest.Count -= buffer.Count; buffer = new ArraySegment<byte>(buffer.Array, buffer.Offset + buffer.Count, 0); return false; } }
// protected methods protected override void SerializeRequest(BsonBinaryWriter bsonWriter, WriteRequest request) { var insertRequest = (InsertRequest)request; var document = insertRequest.Document; if (document == null) { throw new ArgumentException("Batch contains one or more null documents."); } var actualType = document.GetType(); IBsonSerializer serializer; if (actualType == insertRequest.NominalType && insertRequest.Serializer != null) { serializer = insertRequest.Serializer; } else { if (_cachedSerializerType != actualType) { _cachedSerializer = BsonSerializer.LookupSerializer(actualType); _cachedSerializerType = actualType; } serializer = _cachedSerializer; } var serializationOptions = insertRequest.SerializationOptions ?? DocumentSerializationOptions.SerializeIdFirstInstance; var savedCheckElementNames = bsonWriter.CheckElementNames; try { bsonWriter.PushMaxDocumentSize(MaxDocumentSize); bsonWriter.CheckElementNames = _checkElementNames; serializer.Serialize(bsonWriter, insertRequest.NominalType, document, serializationOptions); } finally { bsonWriter.PopMaxDocumentSize(); bsonWriter.CheckElementNames = savedCheckElementNames; } }
/// <summary> /// Begins an asynchronous invocation of the Write service. /// </summary> public IAsyncResult BeginWrite( RequestHeader requestHeader, WriteValueCollection nodesToWrite, AsyncCallback callback, object asyncState) { WriteRequest request = new WriteRequest(); request.RequestHeader = requestHeader; request.NodesToWrite = nodesToWrite; UpdateRequestHeader(request, requestHeader == null, "Write"); if (UseTransportChannel) { return TransportChannel.BeginSendRequest(request, callback, asyncState); } return InnerChannel.BeginWrite(new WriteMessage(request), callback, asyncState); }