public DocumentDataContainerEnumerator_BulkPkScan( int untrimmedCount, DriverRowData rowData, DocumentDataContainer dataContainer, List<FieldMetadata> fields, IDriverDataEnumerator inputDataEnumerator) : base(untrimmedCount, rowData, dataContainer, fields, fields.Count - 1) { if (inputDataEnumerator == null) { throw new ArgumentNullException("inputDataEnumerator"); } m_inputEnumerator = inputDataEnumerator; ReadStructureAndTakeLocks(); }
public void AttachInputDataEnumerator(IDriverDataEnumerator inputDataEnumerator) { if (inputDataEnumerator == null) { throw new ArgumentNullException("inputDataEnumerator"); } if (InputDataEnumerator != null) { throw new InvalidOperationException("Cannot reassign input data enumerator"); } InputDataEnumerator = inputDataEnumerator; }
public DocumentDataContainerEnumerator_BulkPkScan( int untrimmedCount, DriverRowData rowData, DocumentDataContainer dataContainer, List <FieldMetadata> fields, IDriverDataEnumerator inputDataEnumerator) : base(untrimmedCount, rowData, dataContainer, fields, fields.Count - 1) { if (inputDataEnumerator == null) { throw new ArgumentNullException("inputDataEnumerator"); } m_inputEnumerator = inputDataEnumerator; ReadStructureAndTakeLocks(); }
private bool WriteItems( RequestExecutionContext context, RequestExecutionBuffer buffer, IDriverDataEnumerator sourceEnumerator, bool havePendingDriverRow) { // have to check for disposal here, because this method gets invoked in a loop if (m_disposed) { buffer.Error = new ObjectDisposedException("This data engine has been disposed"); return false; } if (sourceEnumerator == null) { return false; } var stream = buffer.Stream; var writer = buffer.Writer; var cts = context.CancellationTokenSource; buffer.RowsOutput = 0; var hasPendingWrite = false; var mustReturnDataset = context.Request.ReturnDataset; var lastValidLength = stream.Length; var totalRowsProduced = context.TotalRowsProduced; var recordsAffected = context.RecordsAffected; var rowsOutputLocally = 0; var func = context.ParsedRequest.BaseDataset.Paging.Offset; var pagingOffset = ReferenceEquals(func, null) ? 0 : func(context.ParsedRequest.Params.InputValues); func = context.ParsedRequest.BaseDataset.Paging.PageSize; var pageSize = ReferenceEquals(func, null) ? Int32.MaxValue : func(context.ParsedRequest.Params.InputValues); // one row might not have fit into previous buffer, let's write it now if (havePendingDriverRow) { try { // no need to apply paging and where clause for pending row: we applied them already // also expect that a buffer can accomodate at least one row // - this requires that rows are never larger than RequestExecutionBuffer.MaxBytesPerBuffer ProduceOutputRow(context); context.OutputDataBuffer.Write(writer); rowsOutputLocally++; lastValidLength = stream.Length; } catch (Exception e) { throw new Exception("Internal buffer may be too small to fit even a single data row", e); } } // now let's deal with remaining items in the enumerator while (lastValidLength < RequestExecutionBuffer.MaxBytesPerBuffer && !cts.IsCancellationRequested) { if (recordsAffected >= pageSize || !sourceEnumerator.MoveNext()) { // enough rows accumulated, or no more rows from driver: halt break; } // if record satisfies WHERE criteria, it gets counted into pre-paging total if (ApplyWhereClause(context)) { var isAccumulating = totalRowsProduced >= pagingOffset; totalRowsProduced++; if (isAccumulating) { // once WHERE has been evaluated, read all remaining fields for this row into the buffer sourceEnumerator.FetchAdditionalFields(); // output row number is needed for "rownumoutput()" Pql function context.ClauseEvaluationContext.RowNumberInOutput = recordsAffected; // increment counter BEFORE producing and writing // even if we fail to write into current buffer, we'll write into next one recordsAffected++; // this will be false when clients do ExecuteNonQuery or Prepare if (mustReturnDataset) { // produce SELECT (output scheme) from FROM (raw data from storage driver) var estimatedSize = ProduceOutputRow(context); if (lastValidLength + estimatedSize > RequestExecutionBuffer.MaxBytesPerBuffer) { // store pending write and return hasPendingWrite = true; break; } // MemoryStream will throw NotSupportedException when trying to expand beyond fixed buffer size // this should never happen (see check above) context.OutputDataBuffer.Write(writer); // this counter gets incremented AFTER writing, // because it indicates how many rows have in fact been put into current block rowsOutputLocally++; } lastValidLength = stream.Length; } // row number is needed for "rownum()" Pql function context.ClauseEvaluationContext.RowNumber++; } } buffer.RowsOutput = rowsOutputLocally; context.RecordsAffected = recordsAffected; context.TotalRowsProduced = totalRowsProduced; stream.Seek(0, SeekOrigin.Begin); return hasPendingWrite; }
private void StartProduction( RequestExecutionContext context, RequestExecutionBuffer buffer, out IDriverDataEnumerator sourceEnumerator) { ReadRequest(context); context.AttachContainerDescriptor(m_containerDescriptor); if (!context.CacheInfo.HaveParsingResults) { lock (context.CacheInfo) { context.CacheInfo.CheckIsError(); if (!context.CacheInfo.HaveParsingResults) { try { ParseRequest(context.Request, context.RequestBulk, context.CacheInfo.ParsedRequest, context.CancellationTokenSource.Token); CompileClauses(context.ContainerDescriptor, context.CacheInfo); Thread.MemoryBarrier(); context.CacheInfo.HaveParsingResults = true; } catch (Exception e) { // make sure that partially complete results do not become visible context.CacheInfo.IsError(e); throw; } } } } context.CacheInfo.WriteParsingResults(context.ParsedRequest); if (context.ParsedRequest.SpecialCommand.IsSpecialCommand) { sourceEnumerator = null; ExecuteSpecialCommandStatement(context, buffer); return; } // structure of output buffer depends on which fields client is asking for // therefore, we re-create and re-attach a driver output buffer for every request context.AttachDriverOutputBufferAndInputParameters( QueryParser.CreateDriverRowDataBuffer(context.ParsedRequest.BaseDataset.BaseFields), context.ParsedRequest); // this enumerator will yield our own driverOutputBuffer for every source record // e.g. the very same context.DriverOutputBuffer is going to be yielded N times from this enumerator if (context.ParsedRequest.StatementType == StatementType.Insert) { if (context.ParsedRequest.IsBulk) { sourceEnumerator = CreateInputDataEnumerator(context); //m_storageDriver.AllocateCapacityForDocumentType(context.ParsedRequest.TargetEntity.DocumentType, context.RequestBulk.InputItemsCount); } else { sourceEnumerator = CreatePseudoEnumeratorForInsertValues(context); } } else { if (context.ParsedRequest.IsBulk) { // for SELECT and DELETE, we only use PK values from the input enumerator // for UPDATE, we use both PK values and other field values from input enumerator context.AttachInputDataEnumerator(CreateInputDataEnumerator(context)); } // driver returns set of rows related to given set of PK values // for a bulk request, sourceEnumerator will yield exactly one item for each item in input enumerator sourceEnumerator = m_storageDriver.GetData(context); } switch (context.ParsedRequest.StatementType) { case StatementType.Select: { context.AttachResponseHeaders(CreateResponseSchemeForSelect(context)); context.PrepareBuffersForSelect(); context.ResponseHeaders.RecordsAffected = 0; } break; case StatementType.Update: { context.AttachResponseHeaders(new DataResponse(0, "Update successful")); context.PrepareBuffersForUpdate(); ExecuteInsertUpdateStatement(context, buffer, sourceEnumerator, DriverChangeType.Update); context.ResponseHeaders.RecordsAffected = context.RecordsAffected; } break; case StatementType.Delete: { context.AttachResponseHeaders(new DataResponse(0, "Delete successful")); context.PrepareBuffersForDelete(); ExecuteDeleteStatement(context, buffer, sourceEnumerator); context.ResponseHeaders.RecordsAffected = context.RecordsAffected; } break; case StatementType.Insert: { context.AttachResponseHeaders(new DataResponse(0, "Insert successful")); context.PrepareChangeBufferForInsert(); ExecuteInsertUpdateStatement(context, buffer, sourceEnumerator, DriverChangeType.Insert); context.ResponseHeaders.RecordsAffected = context.RecordsAffected; } break; default: throw new Exception("Invalid statement type: " + context.ParsedRequest.StatementType); } }
private void ExecuteInsertUpdateStatement(RequestExecutionContext context, RequestExecutionBuffer buffer, IDriverDataEnumerator sourceEnumerator, DriverChangeType changeType) { if (sourceEnumerator == null) { return; } buffer.RowsOutput = 0; var cts = context.CancellationTokenSource; var updates = context.ParsedRequest.Modify.UpdateAssignments; context.ClauseEvaluationContext.RowNumber = 0; context.ClauseEvaluationContext.RowNumberInOutput = context.ClauseEvaluationContext.RowNumber; var changeBuffer = context.ClauseEvaluationContext.ChangeBuffer; var changeset = m_storageDriver.CreateChangeset(changeBuffer, context.ParsedRequest.IsBulk); try { changeBuffer.ChangeType = changeType; while (!cts.IsCancellationRequested && sourceEnumerator.MoveNext()) { // if record satisfies WHERE criteria, compute updated values and give them to driver if (ApplyWhereClause(context)) { // make sure we have values for fields in SET expressions sourceEnumerator.FetchAdditionalFields(); BitVector.SetAll(changeBuffer.Data.NotNulls, false); for (var ordinal = 0; ordinal < updates.Count; ordinal++) { if (updates[ordinal].CompiledExpression != null) { updates[ordinal].CompiledExpression(context.ClauseEvaluationContext); } } // this will either take internal entity id from current data row // or from the computed change buffer data (for non-bulk inserts) sourceEnumerator.FetchInternalEntityIdIntoChangeBuffer(changeBuffer, context); m_storageDriver.AddChange(changeset); } // row number is needed for "rownum()" Pql function context.ClauseEvaluationContext.RowNumber++; // output row number is needed for "rownumoutput()" Pql function context.ClauseEvaluationContext.RowNumberInOutput = context.ClauseEvaluationContext.RowNumber; } if (!cts.IsCancellationRequested) { context.RecordsAffected = m_storageDriver.Apply(changeset); } } catch { m_storageDriver.Discard(changeset); throw; } }
private void ExecuteDeleteStatement(RequestExecutionContext context, RequestExecutionBuffer buffer, IDriverDataEnumerator sourceEnumerator) { if (sourceEnumerator == null) { return; } buffer.RowsOutput = 0; var cts = context.CancellationTokenSource; context.ClauseEvaluationContext.RowNumber = 0; context.ClauseEvaluationContext.RowNumberInOutput = context.ClauseEvaluationContext.RowNumber; var changeBuffer = context.ClauseEvaluationContext.ChangeBuffer; var changeset = m_storageDriver.CreateChangeset(changeBuffer, context.ParsedRequest.IsBulk); try { changeBuffer.ChangeType = DriverChangeType.Delete; while (!cts.IsCancellationRequested && sourceEnumerator.MoveNext()) { // if record satisfies WHERE criteria, compute updated values and give them to driver if (ApplyWhereClause(context)) { // load internal ID, it is needed sourceEnumerator.FetchAdditionalFields(); sourceEnumerator.FetchInternalEntityIdIntoChangeBuffer(changeBuffer, context); m_storageDriver.AddChange(changeset); } // row number is needed for "rownum()" Pql function context.ClauseEvaluationContext.RowNumber++; // output row number is needed for "rownumoutput()" Pql function context.ClauseEvaluationContext.RowNumberInOutput = context.ClauseEvaluationContext.RowNumber; } if (!cts.IsCancellationRequested) { context.RecordsAffected = m_storageDriver.Apply(changeset); } } catch { m_storageDriver.Discard(changeset); throw; } }
/// <summary> /// <see cref="ProducerThreadMethod"/> works in parallel with RPM's <see cref="RequestProcessingManager.WriteTo"/>. /// RPM supplies empty buffers to be filled with data into <see cref="RequestExecutionContext.BuffersRing"/> and consumes them on the other end. /// The data ring has very limited number of buffers. /// RPM is limited by network throughput and Producer's speed. /// Producer is limited by underlying storage driver, local processing speed and RPM's consumption of complete buffers. /// The difference between the two: RPM <see cref="RequestProcessingManager.WriteTo"/> is scheduled for execution by service infrastructure (WCF), /// whereas <see cref="DataEngine.ProducerThreadMethod"/> is scheduled by RPM itself, when it invokes <see cref="IDataEngine.BeginExecution"/>. /// </summary> void ProducerThreadMethod(RequestExecutionContext context) { PqlEngineSecurityContext.Set(new PqlClientSecurityContext( context.AuthContext.UserId, "dummy", context.AuthContext.TenantId, context.AuthContext.ContextId)); var executionPending = true; IDriverDataEnumerator sourceEnumerator = null; try { // row number is needed for "rownum()" Pql function context.ClauseEvaluationContext.RowNumber = 0; // Our production is limited by the network throughput. // Production will also be aborted if the destination sink stops accepting. // In that case, ConsumingEnumerable will either throw or stop yielding. bool havePendingDriverRow = false; foreach (var buffer in context.BuffersRing.ConsumeProcessingTasks(context.CancellationTokenSource.Token) ) { buffer.Cleanup(); try { if (executionPending) { executionPending = false; // read network protocol message // parse-compile expressions and collect information for execution plan // generate response headers and fetch data from Redis // this place fails most often, because of Pql compilation or storage driver connectivity failures StartProduction(context, buffer, out sourceEnumerator); if (context.Request.ReturnDataset) { // write response headers BEFORE the query processing is completed // records affected and whatever other stats will be zero Serializer.SerializeWithLengthPrefix(buffer.Stream, context.ResponseHeaders, PrefixStyle.Base128); } } // go through retrieved data havePendingDriverRow = ((DataEngine)context.Engine).WriteItems( context, buffer, sourceEnumerator, havePendingDriverRow); // some consistency checks if (context.Request.ReturnDataset) { if (havePendingDriverRow && buffer.RowsOutput == 0) { throw new Exception("Internal error: should not have pending row when no data is produced"); } } else { if (havePendingDriverRow) { throw new Exception("Internal error: should not have pending row when no dataset is requested"); } if (buffer.Stream.Length > 0) { throw new Exception("Internal error: should not have written anything to stream when no dataset is requested"); } } // time to quit? // no dataset requested, don't have any more data, or enough rows accumulated for requested page of results? if (buffer.RowsOutput == 0 || buffer.IsFailed || !context.Request.ReturnDataset) { if (!context.Request.ReturnDataset) { // if there is no dataset sent, write response headers AFTER processing the query // records affected and whatever other stats are meaningful context.ResponseHeaders.RecordsAffected = context.RecordsAffected; Serializer.SerializeWithLengthPrefix(buffer.Stream, context.ResponseHeaders, PrefixStyle.Base128); } break; } } catch (Exception e) { buffer.Error = e; context.TrySetLastError(e); m_tracer.Exception(e); // this will go to client, and overwrite whatever we managed to put into buffer before failure using (var writer = new PqlErrorDataWriter(1, e, false)) { buffer.Stream.SetLength(0); writer.WriteTo(buffer.Stream); } return; } finally { // return the buffer back to the ring in any case context.BuffersRing.ReturnCompletedTask(buffer); } } } catch (OperationCanceledException e) { context.Cancel(e); } catch (Exception e) { if (Environment.HasShutdownStarted) { // nobody cares now return; } var cts = context.CancellationTokenSource; if (cts != null && !cts.IsCancellationRequested) { m_tracer.Exception(e); context.Cancel(e); } } finally { var ring = context.BuffersRing; if (ring != null) { ring.CompleteAddingCompletedTasks(); } if (sourceEnumerator != null) { // release driver-level resources & locks sourceEnumerator.Dispose(); } } }
private bool WriteItems( RequestExecutionContext context, RequestExecutionBuffer buffer, IDriverDataEnumerator sourceEnumerator, bool havePendingDriverRow) { // have to check for disposal here, because this method gets invoked in a loop if (m_disposed) { buffer.Error = new ObjectDisposedException("This data engine has been disposed"); return(false); } if (sourceEnumerator == null) { return(false); } var stream = buffer.Stream; var writer = buffer.Writer; var cts = context.CancellationTokenSource; buffer.RowsOutput = 0; var hasPendingWrite = false; var mustReturnDataset = context.Request.ReturnDataset; var lastValidLength = stream.Length; var totalRowsProduced = context.TotalRowsProduced; var recordsAffected = context.RecordsAffected; var rowsOutputLocally = 0; var func = context.ParsedRequest.BaseDataset.Paging.Offset; var pagingOffset = ReferenceEquals(func, null) ? 0 : func(context.ParsedRequest.Params.InputValues); func = context.ParsedRequest.BaseDataset.Paging.PageSize; var pageSize = ReferenceEquals(func, null) ? Int32.MaxValue : func(context.ParsedRequest.Params.InputValues); // one row might not have fit into previous buffer, let's write it now if (havePendingDriverRow) { try { // no need to apply paging and where clause for pending row: we applied them already // also expect that a buffer can accomodate at least one row // - this requires that rows are never larger than RequestExecutionBuffer.MaxBytesPerBuffer ProduceOutputRow(context); context.OutputDataBuffer.Write(writer); rowsOutputLocally++; lastValidLength = stream.Length; } catch (Exception e) { throw new Exception("Internal buffer may be too small to fit even a single data row", e); } } // now let's deal with remaining items in the enumerator while (lastValidLength < RequestExecutionBuffer.MaxBytesPerBuffer && !cts.IsCancellationRequested) { if (recordsAffected >= pageSize || !sourceEnumerator.MoveNext()) { // enough rows accumulated, or no more rows from driver: halt break; } // if record satisfies WHERE criteria, it gets counted into pre-paging total if (ApplyWhereClause(context)) { var isAccumulating = totalRowsProduced >= pagingOffset; totalRowsProduced++; if (isAccumulating) { // once WHERE has been evaluated, read all remaining fields for this row into the buffer sourceEnumerator.FetchAdditionalFields(); // output row number is needed for "rownumoutput()" Pql function context.ClauseEvaluationContext.RowNumberInOutput = recordsAffected; // increment counter BEFORE producing and writing // even if we fail to write into current buffer, we'll write into next one recordsAffected++; // this will be false when clients do ExecuteNonQuery or Prepare if (mustReturnDataset) { // produce SELECT (output scheme) from FROM (raw data from storage driver) var estimatedSize = ProduceOutputRow(context); if (lastValidLength + estimatedSize > RequestExecutionBuffer.MaxBytesPerBuffer) { // store pending write and return hasPendingWrite = true; break; } // MemoryStream will throw NotSupportedException when trying to expand beyond fixed buffer size // this should never happen (see check above) context.OutputDataBuffer.Write(writer); // this counter gets incremented AFTER writing, // because it indicates how many rows have in fact been put into current block rowsOutputLocally++; } lastValidLength = stream.Length; } // row number is needed for "rownum()" Pql function context.ClauseEvaluationContext.RowNumber++; } } buffer.RowsOutput = rowsOutputLocally; context.RecordsAffected = recordsAffected; context.TotalRowsProduced = totalRowsProduced; stream.Seek(0, SeekOrigin.Begin); return(hasPendingWrite); }
public IDriverDataEnumerator GetBulkUpdateEnumerator(List<FieldMetadata> fields, DriverRowData driverRow, IDriverDataEnumerator inputDataEnumerator) { var untrimmedCount = m_untrimmedDocumentCount; if (untrimmedCount == 0) { return null; } return new DocumentDataContainerEnumerator_BulkPkScan(untrimmedCount, driverRow, this, fields, inputDataEnumerator); }
public IDriverDataEnumerator GetBulkUpdateEnumerator(List <FieldMetadata> fields, DriverRowData driverRow, IDriverDataEnumerator inputDataEnumerator) { var untrimmedCount = m_untrimmedDocumentCount; if (untrimmedCount == 0) { return(null); } return(new DocumentDataContainerEnumerator_BulkPkScan(untrimmedCount, driverRow, this, fields, inputDataEnumerator)); }