internal async Task Ensure(int count, bool async, bool dontBreakOnTimeouts = false) { Debug.Assert(count <= Size); count -= ReadBytesLeft; if (count <= 0) { return; } if (ReadPosition == _filledBytes) { Clear(); } else if (count > Size - _filledBytes) { Array.Copy(Buffer, ReadPosition, Buffer, 0, ReadBytesLeft); _filledBytes = ReadBytesLeft; ReadPosition = 0; } try { while (count > 0) { var toRead = Size - _filledBytes; var read = async ? await Underlying.ReadAsync(Buffer, _filledBytes, toRead) : Underlying.Read(Buffer, _filledBytes, toRead); if (read == 0) { throw new EndOfStreamException(); } count -= read; _filledBytes += read; } } // We have a special case when reading async notifications - a timeout may be normal // shouldn't be fatal // Note that mono throws SocketException with the wrong error (see #1330) catch (IOException e) when( dontBreakOnTimeouts && (e.InnerException as SocketException)?.SocketErrorCode == (Type.GetType("Mono.Runtime") == null ? SocketError.TimedOut : SocketError.WouldBlock) ) { throw new TimeoutException("Timeout while reading from stream"); } catch (Exception e) { Connector.Break(); throw new NpgsqlException("Exception while reading from stream", e); } }
public override int Read(byte[] buffer, int offset, int count) { CheckDisposed(); if (!CanRead) { throw new InvalidOperationException("Stream not open for reading"); } var totalRead = 0; do { if (_leftToReadInDataMsg == 0) { if (_isConsumed) { return(0); } var msg = _connector.ReadSingleMessage(); switch (msg.Code) { case BackendMessageCode.CopyData: _leftToReadInDataMsg = ((CopyDataMessage)msg).Length; break; case BackendMessageCode.CopyDone: _connector.ReadExpecting <CommandCompleteMessage>(); _connector.ReadExpecting <ReadyForQueryMessage>(); _isConsumed = true; goto done; default: _connector.Break(); throw new Exception("Received unexpected message type in COPY OUT: " + msg.Code); } } var len = Math.Min(count, _leftToReadInDataMsg); _buf.ReadBytesSimple(buffer, offset, len); offset += len; count -= len; _leftToReadInDataMsg -= len; totalRead += len; } while (count > 0); done: return(totalRead); }
internal void Ensure(int count, bool dontBreakOnTimeouts = false) { Contract.Requires(count <= Size); count -= ReadBytesLeft; if (count <= 0) { return; } if (ReadPosition == _filledBytes) { Clear(); } else if (count > Size - _filledBytes) { Array.Copy(_buf, ReadPosition, _buf, 0, ReadBytesLeft); _filledBytes = ReadBytesLeft; ReadPosition = 0; } try { while (count > 0) { var toRead = Size - _filledBytes; var read = Underlying.Read(_buf, _filledBytes, toRead); if (read == 0) { throw new EndOfStreamException(); } count -= read; _filledBytes += read; } } // We have a special case when reading async notifications - a timeout may be normal // shouldn't be fatal catch (IOException e) when( dontBreakOnTimeouts && (e.InnerException as SocketException)?.SocketErrorCode == SocketError.TimedOut ) { throw new TimeoutException("Timeout while reading from stream"); } catch (Exception e) { Connector.Break(); throw new NpgsqlException("Exception while reading from stream", e); } }
internal NpgsqlBinaryExporter(NpgsqlConnector connector, string copyToCommand) { _connector = connector; _buf = connector.ReadBuffer; _registry = connector.TypeHandlerRegistry; _columnLen = int.MinValue; // Mark that the (first) column length hasn't been read yet _column = -1; try { _connector.SendQuery(copyToCommand); // TODO: Failure will break the connection (e.g. if we get CopyOutResponse), handle more gracefully var copyOutResponse = _connector.ReadExpecting <CopyOutResponseMessage>(); if (!copyOutResponse.IsBinary) { throw new ArgumentException("copyToCommand triggered a text transfer, only binary is allowed", nameof(copyToCommand)); } NumColumns = copyOutResponse.NumColumns; ReadHeader(); } catch { _connector.Break(); throw; } }
/// <summary> /// Cancels and terminates an ongoing operation. Any data already written will be discarded. /// </summary> public void Cancel() { CheckDisposed(); if (CanWrite) { _isDisposed = true; _buf.Clear(); _connector.SendSingleMessage(new CopyFailMessage()); try { var msg = _connector.ReadSingleMessage(); // The CopyFail should immediately trigger an exception from the read above. _connector.Break(); throw new Exception("Expected ErrorResponse when cancelling COPY but got: " + msg.Code); } catch (NpgsqlException e) { if (e.Code == "57014") { return; } throw; } } else { _connector.CancelRequest(); } }
async Task Cancel(bool async, CancellationToken cancellationToken = default) { _state = ImporterState.Cancelled; _buf.Clear(); _buf.EndCopyMode(); await _connector.WriteCopyFail(async, cancellationToken); await _connector.Flush(async, cancellationToken); try { var msg = await _connector.ReadMessage(async, cancellationToken); // The CopyFail should immediately trigger an exception from the read above. throw _connector.Break( new NpgsqlException("Expected ErrorResponse when cancelling COPY but got: " + msg.Code)); } catch (PostgresException e) { if (e.SqlState != PostgresErrorCodes.QueryCanceled) { throw; } } }
internal NpgsqlRawCopyStream(NpgsqlConnector connector, string copyCommand) { _connector = connector; _buf = connector.Buffer; _connector.SendSingleMessage(new QueryMessage(copyCommand)); var msg = _connector.ReadSingleMessage(); switch (msg.Code) { case BackendMessageCode.CopyInResponse: var copyInResponse = (CopyInResponseMessage)msg; IsBinary = copyInResponse.IsBinary; _canWrite = true; break; case BackendMessageCode.CopyOutResponse: var copyOutResponse = (CopyOutResponseMessage)msg; IsBinary = copyOutResponse.IsBinary; _canRead = true; break; default: _connector.Break(); throw new Exception("Unexpected message received when expecting CopyInResponse or CopyOutResponse: " + msg.Code); } _connector.State = ConnectorState.Copy; }
internal NpgsqlBinaryImporter(NpgsqlConnector connector, string copyFromCommand) { _connector = connector; _buf = connector.WriteBuffer; _registry = connector.TypeHandlerRegistry; _lengthCache = new LengthCache(); _column = -1; _dummyParam = new NpgsqlParameter(); try { _connector.SendQuery(copyFromCommand); // TODO: Failure will break the connection (e.g. if we get CopyOutResponse), handle more gracefully var copyInResponse = _connector.ReadExpecting <CopyInResponseMessage>(); if (!copyInResponse.IsBinary) { throw new ArgumentException("copyFromCommand triggered a text transfer, only binary is allowed", nameof(copyFromCommand)); } NumColumns = copyInResponse.NumColumns; WriteHeader(); // We will be sending CopyData messages from now on, deduct the header from the buffer's usable size _buf.UsableSize -= 5; } catch { _connector.Break(); throw; } }
internal NpgsqlBinaryImporter(NpgsqlConnector connector, string copyFromCommand) { _connector = connector; _buf = connector.Buffer; _registry = connector.TypeHandlerRegistry; _lengthCache = new LengthCache(); _column = -1; _dummyParam = new NpgsqlParameter(); try { _connector.SendSingleMessage(new QueryMessage(copyFromCommand)); // TODO: Failure will break the connection (e.g. if we get CopyOutResponse), handle more gracefully var copyInResponse = _connector.ReadExpecting <CopyInResponseMessage>(); if (!copyInResponse.IsBinary) { throw new ArgumentException("copyFromCommand triggered a text transfer, only binary is allowed", "copyFromCommand"); } NumColumns = copyInResponse.NumColumns; WriteHeader(); } catch { _connector.Break(); throw; } }
internal static T Expect <T>(IBackendMessage msg, NpgsqlConnector connector) { if (msg is T asT) { return(asT); } connector.Break(); throw new NpgsqlException($"Received backend message {msg.Code} while expecting {typeof(T).Name}. Please file a bug."); }
void DoWrite <T>(TypeHandler handler, [CanBeNull] T value) { try { // We simulate the regular writing process with a validation/length calculation pass, // followed by a write pass _dummyParam.ConvertedValue = null; _lengthCache.Clear(); handler.ValidateAndGetLength(value, ref _lengthCache, _dummyParam); _lengthCache.Rewind(); handler.WriteWithLength(value, _buf, _lengthCache, _dummyParam, false, CancellationToken.None); _column++; } catch { _connector.Break(); Cleanup(); throw; } }
public void Write(ReadOnlySpan <byte> buffer) #endif { CheckDisposed(); if (!CanWrite) { throw new InvalidOperationException("Stream not open for writing"); } if (buffer.Length == 0) { return; } if (buffer.Length <= _writeBuf.WriteSpaceLeft) { _writeBuf.WriteBytes(buffer); return; } try { // Value is too big, flush. Flush(); if (buffer.Length <= _writeBuf.WriteSpaceLeft) { _writeBuf.WriteBytes(buffer); return; } // Value is too big even after a flush - bypass the buffer and write directly. _writeBuf.DirectWrite(buffer); } catch (Exception e) { _connector.Break(e); Cleanup(); throw; } }
internal async Task Init(string copyFromCommand, bool async, CancellationToken cancellationToken = default) { await _connector.WriteQuery(copyFromCommand, async, cancellationToken); await _connector.Flush(async, cancellationToken); using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); CopyInResponseMessage copyInResponse; var msg = await _connector.ReadMessage(async); switch (msg.Code) { case BackendMessageCode.CopyInResponse: copyInResponse = (CopyInResponseMessage)msg; if (!copyInResponse.IsBinary) { throw _connector.Break( new ArgumentException("copyFromCommand triggered a text transfer, only binary is allowed", nameof(copyFromCommand))); } break; case BackendMessageCode.CommandComplete: throw new InvalidOperationException( "This API only supports import/export from the client, i.e. COPY commands containing TO/FROM STDIN. " + "To import/export with files on your PostgreSQL machine, simply execute the command with ExecuteNonQuery. " + "Note that your data has been successfully imported/exported."); default: throw _connector.UnexpectedMessageReceived(msg.Code); } NumColumns = copyInResponse.NumColumns; _params = new NpgsqlParameter[NumColumns]; _buf.StartCopyMode(); WriteHeader(); }
public override void Write(byte[] buffer, int offset, int count) { CheckDisposed(); if (!CanWrite) { throw new InvalidOperationException("Stream not open for writing"); } if (count == 0) { return; } if (count <= _writeBuf.WriteSpaceLeft) { _writeBuf.WriteBytes(buffer, offset, count); return; } try { // Value is too big, flush. Flush(); if (count <= _writeBuf.WriteSpaceLeft) { _writeBuf.WriteBytes(buffer, offset, count); return; } // Value is too big even after a flush - bypass the buffer and write directly. _writeBuf.DirectWrite(buffer, offset, count); } catch { _connector.Break(); Cleanup(); throw; } }
public override void Write(byte[] buffer, int offset, int count) { CheckDisposed(); if (!CanWrite) { throw new InvalidOperationException("Stream not open for writing"); } if (count == 0) { return; } EnsureDataMessage(); if (count <= _writeBuf.WriteSpaceLeft) { _writeBuf.WriteBytes(buffer, offset, count); return; } try { // Value is too big. Flush whatever is in the buffer, then write a new CopyData // directly with the buffer. Flush(); _writeBuf.WriteByte((byte)BackendMessageCode.CopyData); _writeBuf.WriteInt32(count + 4); _writeBuf.Flush(); _writeBuf.DirectWrite(buffer, offset, count); EnsureDataMessage(); } catch { _connector.Break(); Cleanup(); throw; } }
/// <summary> /// Cancels and terminates an ongoing import. Any data already written will be discarded. /// </summary> public void Cancel() { _isDisposed = true; _buf.Clear(); _connector.SendSingleMessage(new CopyFailMessage()); try { var msg = _connector.ReadSingleMessage(); _connector.Break(); throw new Exception("Expected ErrorResponse when cancelling COPY but got: " + msg.Code); } catch (NpgsqlException e) { if (e.Code == "57014") { return; } throw; } }
internal NpgsqlBinaryImporter(NpgsqlConnector connector, string copyFromCommand) { _connector = connector; _buf = connector.WriteBuffer; _registry = connector.TypeHandlerRegistry; _lengthCache = new LengthCache(); _column = -1; _dummyParam = new NpgsqlParameter(); try { _connector.SendQuery(copyFromCommand); CopyInResponseMessage copyInResponse; var msg = _connector.ReadMessage(); switch (msg.Code) { case BackendMessageCode.CopyInResponse: copyInResponse = (CopyInResponseMessage)msg; if (!copyInResponse.IsBinary) { throw new ArgumentException("copyFromCommand triggered a text transfer, only binary is allowed", nameof(copyFromCommand)); } break; case BackendMessageCode.CompletedResponse: throw new InvalidOperationException( "This API only supports import/export from the client, i.e. COPY commands containing TO/FROM STDIN. " + "To import/export with files on your PostgreSQL machine, simply execute the command with ExecuteNonQuery. " + "Note that your data has been successfully imported/exported."); default: throw _connector.UnexpectedMessageReceived(msg.Code); } NumColumns = copyInResponse.NumColumns; _buf.StartCopyMode(); WriteHeader(); } catch { _connector.Break(); throw; } }
internal NpgsqlBinaryExporter(NpgsqlConnector connector, string copyToCommand) { _connector = connector; _buf = connector.ReadBuffer; _typeMapper = connector.TypeMapper; _columnLen = int.MinValue; // Mark that the (first) column length hasn't been read yet _column = -1; try { _connector.SendQuery(copyToCommand); CopyOutResponseMessage copyOutResponse; var msg = _connector.ReadMessage(); switch (msg.Code) { case BackendMessageCode.CopyOutResponse: copyOutResponse = (CopyOutResponseMessage)msg; if (!copyOutResponse.IsBinary) { throw new ArgumentException("copyToCommand triggered a text transfer, only binary is allowed", nameof(copyToCommand)); } break; case BackendMessageCode.CompletedResponse: throw new InvalidOperationException( "This API only supports import/export from the client, i.e. COPY commands containing TO/FROM STDIN. " + "To import/export with files on your PostgreSQL machine, simply execute the command with ExecuteNonQuery. " + "Note that your data has been successfully imported/exported."); default: throw _connector.UnexpectedMessageReceived(msg.Code); } NumColumns = copyOutResponse.NumColumns; _typeHandlerCache = new NpgsqlTypeHandler[NumColumns]; ReadHeader(); } catch { _connector.Break(); throw; } }
T DoRead <T>(TypeHandler handler) { try { ReadColumnLenIfNeeded(); if (_columnLen == -1) { throw new InvalidCastException("Column is null"); } var result = handler.ReadFully <T>(_buf, _columnLen); _leftToReadInDataMsg -= _columnLen; _columnLen = int.MinValue; // Mark that the (next) column length hasn't been read yet _column++; return(result); } catch { _connector.Break(); Cleanup(); throw; } }
void Cancel() { _state = ImporterState.Cancelled; _buf.Clear(); _buf.EndCopyMode(); _connector.SendMessage(new CopyFailMessage()); try { var msg = _connector.ReadMessage(); // The CopyFail should immediately trigger an exception from the read above. _connector.Break(); throw new NpgsqlException("Expected ErrorResponse when cancelling COPY but got: " + msg.Code); } catch (PostgresException e) { if (e.SqlState != "57014") { throw; } } }
T DoRead <T>(NpgsqlTypeHandler handler) { try { ReadColumnLenIfNeeded(); if (_columnLen == -1) { throw new InvalidCastException("Column is null"); } // If we know the entire column is already in memory, use the code path without async var result = _columnLen <= _buf.ReadBytesLeft ? handler.Read <T>(_buf, _columnLen) : handler.Read <T>(_buf, _columnLen, false).GetAwaiter().GetResult(); _leftToReadInDataMsg -= _columnLen; _columnLen = int.MinValue; // Mark that the (next) column length hasn't been read yet _column++; return(result); } catch { _connector.Break(); Cleanup(); throw; } }
void DoWrite <T>(TypeHandler handler, T value) { try { if (_buf.WriteSpaceLeft < 4) { FlushAndStartDataMessage(); } var asObject = (object)value; // TODO: Implement boxless writing in the future if (asObject == null) { _buf.WriteInt32(-1); _column++; return; } _dummyParam.ConvertedValue = null; var asSimple = handler as ISimpleTypeWriter; if (asSimple != null) { var len = asSimple.ValidateAndGetLength(asObject, _dummyParam); _buf.WriteInt32(len); if (_buf.WriteSpaceLeft < len) { Contract.Assume(_buf.Size >= len); FlushAndStartDataMessage(); } asSimple.Write(asObject, _buf, _dummyParam); _column++; return; } var asChunking = handler as IChunkingTypeWriter; if (asChunking != null) { _lengthCache.Clear(); var len = asChunking.ValidateAndGetLength(asObject, ref _lengthCache, _dummyParam); _buf.WriteInt32(len); // If the type handler used the length cache, rewind it to skip the first position: // it contains the entire value length which we already have in len. if (_lengthCache.Position > 0) { _lengthCache.Rewind(); _lengthCache.Position++; } asChunking.PrepareWrite(asObject, _buf, _lengthCache, _dummyParam); var directBuf = new DirectBuffer(); while (!asChunking.Write(ref directBuf)) { Flush(); // The following is an optimization hack for writing large byte arrays without passing // through our buffer if (directBuf.Buffer != null) { len = directBuf.Size == 0 ? directBuf.Buffer.Length : directBuf.Size; _buf.WritePosition = 1; _buf.WriteInt32(len + 4); _buf.Flush(); _writingDataMsg = false; _buf.Underlying.Write(directBuf.Buffer, directBuf.Offset, len); directBuf.Buffer = null; directBuf.Size = 0; } EnsureDataMessage(); } _column++; return; } throw PGUtil.ThrowIfReached(); } catch { _connector.Break(); Cleanup(); throw; } }
T DoRead <T>(TypeHandler handler) { try { ReadColumnLenIfNeeded(); if (_columnLen == -1) { throw new InvalidCastException("Column is null"); } // TODO: Duplication with NpgsqlDataReader.GetFieldValueInternal T result; // The type handler supports the requested type directly var tHandler = handler as ITypeHandler <T>; if (tHandler != null) { result = handler.Read <T>(_buf, _columnLen, false).Result; } else { var t = typeof(T); if (!t.IsArray) { throw new InvalidCastException($"Can't cast database type {handler.PgDisplayName} to {typeof(T).Name}"); } // Getting an array // We need to treat this as an actual array type, these need special treatment because of // typing/generics reasons (there is no way to express "array of X" with generics var elementType = t.GetElementType(); var arrayHandler = handler as ArrayHandler; if (arrayHandler == null) { throw new InvalidCastException($"Can't cast database type {handler.PgDisplayName} to {typeof(T).Name}"); } if (arrayHandler.GetElementFieldType() == elementType) { result = (T)handler.ReadAsObject(_buf, _columnLen, false).Result; } else if (arrayHandler.GetElementPsvType() == elementType) { result = (T)handler.ReadPsvAsObject(_buf, _columnLen, false).Result; } else { throw new InvalidCastException($"Can't cast database type {handler.PgDisplayName} to {typeof(T).Name}"); } } _leftToReadInDataMsg -= _columnLen; _columnLen = int.MinValue; // Mark that the (next) column length hasn't been read yet _column++; return(result); } catch { _connector.Break(); Cleanup(); throw; } }
internal Task Ensure(int count, bool async, bool dontBreakOnTimeouts) { return(count <= ReadBytesLeft ? PGUtil.CompletedTask : EnsureLong()); async Task EnsureLong() { Debug.Assert(count <= Size); Debug.Assert(count > ReadBytesLeft); count -= ReadBytesLeft; if (count <= 0) { return; } if (ReadPosition == FilledBytes) { Clear(); } else if (count > Size - FilledBytes) { Array.Copy(Buffer, ReadPosition, Buffer, 0, ReadBytesLeft); FilledBytes = ReadBytesLeft; ReadPosition = 0; } try { while (count > 0) { var toRead = Size - FilledBytes; int read; if (async) { if (AwaitableSocket == null) // SSL { read = await Underlying.ReadAsync(Buffer, FilledBytes, toRead); } else // Non-SSL async I/O, optimized { AwaitableSocket.SetBuffer(Buffer, FilledBytes, toRead); await AwaitableSocket.ReceiveAsync(); read = AwaitableSocket.BytesTransferred; } } else // Sync I/O { read = Underlying.Read(Buffer, FilledBytes, toRead); } if (read == 0) { throw new EndOfStreamException(); } count -= read; FilledBytes += read; } } // We have a special case when reading async notifications - a timeout may be normal // shouldn't be fatal // Note that mono throws SocketException with the wrong error (see #1330) catch (IOException e) when( dontBreakOnTimeouts && (e.InnerException as SocketException)?.SocketErrorCode == (Type.GetType("Mono.Runtime") == null ? SocketError.TimedOut : SocketError.WouldBlock) ) { throw new TimeoutException("Timeout while reading from stream"); } catch (Exception e) { Connector.Break(); throw new NpgsqlException("Exception while reading from stream", e); } } }
public async Task Flush(bool async) { if (_copyMode) { // In copy mode, we write CopyData messages. The message code has already been // written to the beginning of the buffer, but we need to go back and write the // length. if (WritePosition == 1) { return; } var pos = WritePosition; WritePosition = 1; WriteInt32(pos - 1); WritePosition = pos; } else if (WritePosition == 0) { return; } try { if (async) { if (AwaitableSocket == null) // SSL { await Underlying.WriteAsync(Buffer, 0, WritePosition); } else // Non-SSL async I/O, optimized { AwaitableSocket.SetBuffer(Buffer, 0, WritePosition); await AwaitableSocket.SendAsync(); } } else // Sync I/O { Underlying.Write(Buffer, 0, WritePosition); } } catch (Exception e) { Connector.Break(); throw new NpgsqlException("Exception while writing to stream", e); } try { if (async) { await Underlying.FlushAsync(); } else { Underlying.Flush(); } } catch (Exception e) { Connector.Break(); throw new NpgsqlException("Exception while flushing stream", e); } NpgsqlEventSource.Log.BytesWritten(WritePosition); //NpgsqlEventSource.Log.RequestFailed(); WritePosition = 0; if (CurrentCommand != null) { CurrentCommand.FlushOccurred = true; CurrentCommand = null; } if (_copyMode) { WriteCopyDataHeader(); } }