public void BeginExecution(RequestExecutionContext context) { CheckDisposed(); m_utcLastUsedAt = DateTime.UtcNow; if (m_activeProcessors.ContainsKey(context)) { throw new InvalidOperationException("This context is marked as being executed now"); } var task = new Task( () => ProducerThreadMethod(context), TaskCreationOptions.LongRunning); if (!m_activeProcessors.TryAdd(context, task)) { throw new Exception("Could not register executing task for this context"); } try { task.Start(); } catch { m_activeProcessors.TryRemove(context, out task); throw; } }
public RequestCompletion(RequestExecutionContext executionContext) { if (executionContext == null) { throw new ArgumentNullException("executionContext"); } m_executionContext = executionContext; }
public void FetchInternalEntityIdIntoChangeBuffer(DriverChangeBuffer changeBuffer, RequestExecutionContext context) { if (changeBuffer.InternalEntityId == null) { changeBuffer.InternalEntityId = new byte[context.DriverOutputBuffer.InternalEntityId.Length]; } ReadPrimaryKey(changeBuffer); }
public RequestProcessingManager(ITracer tracer, IPqlEngineHostProcess process, RawDataWriterPerfCounters counters) { if (tracer == null) { throw new ArgumentNullException("tracer"); } if (process == null) { throw new ArgumentNullException("process"); } if (counters == null) { throw new ArgumentNullException("counters"); } m_counters = counters; m_executionContext = new RequestExecutionContext(process, tracer); }
private static DataResponse CreateResponseSchemeForSelect(RequestExecutionContext context) { var parsedRequest = context.ParsedRequest; var fields = new DataResponseField[parsedRequest.Select.OutputColumns.Count]; var ordinal = 0; foreach (var clause in parsedRequest.Select.OutputColumns) { fields[ordinal] = new DataResponseField { DataType = clause.DbType, DisplayName = parsedRequest.Select.OutputColumns[ordinal].Label, Name = parsedRequest.Select.OutputColumns[ordinal].Label, Ordinal = ordinal }; ordinal++; } return new DataResponse(fields); }
private void StartProduction( RequestExecutionContext context, RequestExecutionBuffer buffer, out IDriverDataEnumerator sourceEnumerator) { ReadRequest(context); context.AttachContainerDescriptor(m_containerDescriptor); if (!context.CacheInfo.HaveParsingResults) { lock (context.CacheInfo) { context.CacheInfo.CheckIsError(); if (!context.CacheInfo.HaveParsingResults) { try { ParseRequest(context.Request, context.RequestBulk, context.CacheInfo.ParsedRequest, context.CancellationTokenSource.Token); CompileClauses(context.ContainerDescriptor, context.CacheInfo); Thread.MemoryBarrier(); context.CacheInfo.HaveParsingResults = true; } catch (Exception e) { // make sure that partially complete results do not become visible context.CacheInfo.IsError(e); throw; } } } } context.CacheInfo.WriteParsingResults(context.ParsedRequest); if (context.ParsedRequest.SpecialCommand.IsSpecialCommand) { sourceEnumerator = null; ExecuteSpecialCommandStatement(context, buffer); return; } // structure of output buffer depends on which fields client is asking for // therefore, we re-create and re-attach a driver output buffer for every request context.AttachDriverOutputBufferAndInputParameters( QueryParser.CreateDriverRowDataBuffer(context.ParsedRequest.BaseDataset.BaseFields), context.ParsedRequest); // this enumerator will yield our own driverOutputBuffer for every source record // e.g. the very same context.DriverOutputBuffer is going to be yielded N times from this enumerator if (context.ParsedRequest.StatementType == StatementType.Insert) { if (context.ParsedRequest.IsBulk) { sourceEnumerator = CreateInputDataEnumerator(context); //m_storageDriver.AllocateCapacityForDocumentType(context.ParsedRequest.TargetEntity.DocumentType, context.RequestBulk.InputItemsCount); } else { sourceEnumerator = CreatePseudoEnumeratorForInsertValues(context); } } else { if (context.ParsedRequest.IsBulk) { // for SELECT and DELETE, we only use PK values from the input enumerator // for UPDATE, we use both PK values and other field values from input enumerator context.AttachInputDataEnumerator(CreateInputDataEnumerator(context)); } // driver returns set of rows related to given set of PK values // for a bulk request, sourceEnumerator will yield exactly one item for each item in input enumerator sourceEnumerator = m_storageDriver.GetData(context); } switch (context.ParsedRequest.StatementType) { case StatementType.Select: { context.AttachResponseHeaders(CreateResponseSchemeForSelect(context)); context.PrepareBuffersForSelect(); context.ResponseHeaders.RecordsAffected = 0; } break; case StatementType.Update: { context.AttachResponseHeaders(new DataResponse(0, "Update successful")); context.PrepareBuffersForUpdate(); ExecuteInsertUpdateStatement(context, buffer, sourceEnumerator, DriverChangeType.Update); context.ResponseHeaders.RecordsAffected = context.RecordsAffected; } break; case StatementType.Delete: { context.AttachResponseHeaders(new DataResponse(0, "Delete successful")); context.PrepareBuffersForDelete(); ExecuteDeleteStatement(context, buffer, sourceEnumerator); context.ResponseHeaders.RecordsAffected = context.RecordsAffected; } break; case StatementType.Insert: { context.AttachResponseHeaders(new DataResponse(0, "Insert successful")); context.PrepareChangeBufferForInsert(); ExecuteInsertUpdateStatement(context, buffer, sourceEnumerator, DriverChangeType.Insert); context.ResponseHeaders.RecordsAffected = context.RecordsAffected; } break; default: throw new Exception("Invalid statement type: " + context.ParsedRequest.StatementType); } }
private void ExecuteSpecialCommandStatement(RequestExecutionContext context, RequestExecutionBuffer buffer) { switch (context.ParsedRequest.SpecialCommand.CommandType) { case ParsedRequest.SpecialCommandData.SpecialCommandType.Defragment: context.AttachResponseHeaders(new DataResponse(0, "Defragmentation completed")); m_storageDriver.Compact(CompactionOptions.FullReindex); break; default: throw new ArgumentOutOfRangeException("context", context.ParsedRequest.SpecialCommand.CommandType, "Invalid special command"); } }
/// <summary> /// <see cref="ProducerThreadMethod"/> works in parallel with RPM's <see cref="RequestProcessingManager.WriteTo"/>. /// RPM supplies empty buffers to be filled with data into <see cref="RequestExecutionContext.BuffersRing"/> and consumes them on the other end. /// The data ring has very limited number of buffers. /// RPM is limited by network throughput and Producer's speed. /// Producer is limited by underlying storage driver, local processing speed and RPM's consumption of complete buffers. /// The difference between the two: RPM <see cref="RequestProcessingManager.WriteTo"/> is scheduled for execution by service infrastructure (WCF), /// whereas <see cref="DataEngine.ProducerThreadMethod"/> is scheduled by RPM itself, when it invokes <see cref="IDataEngine.BeginExecution"/>. /// </summary> void ProducerThreadMethod(RequestExecutionContext context) { PqlEngineSecurityContext.Set(new PqlClientSecurityContext( context.AuthContext.UserId, "dummy", context.AuthContext.TenantId, context.AuthContext.ContextId)); var executionPending = true; IDriverDataEnumerator sourceEnumerator = null; try { // row number is needed for "rownum()" Pql function context.ClauseEvaluationContext.RowNumber = 0; // Our production is limited by the network throughput. // Production will also be aborted if the destination sink stops accepting. // In that case, ConsumingEnumerable will either throw or stop yielding. bool havePendingDriverRow = false; foreach (var buffer in context.BuffersRing.ConsumeProcessingTasks(context.CancellationTokenSource.Token) ) { buffer.Cleanup(); try { if (executionPending) { executionPending = false; // read network protocol message // parse-compile expressions and collect information for execution plan // generate response headers and fetch data from Redis // this place fails most often, because of Pql compilation or storage driver connectivity failures StartProduction(context, buffer, out sourceEnumerator); if (context.Request.ReturnDataset) { // write response headers BEFORE the query processing is completed // records affected and whatever other stats will be zero Serializer.SerializeWithLengthPrefix(buffer.Stream, context.ResponseHeaders, PrefixStyle.Base128); } } // go through retrieved data havePendingDriverRow = ((DataEngine) context.Engine).WriteItems( context, buffer, sourceEnumerator, havePendingDriverRow); // some consistency checks if (context.Request.ReturnDataset) { if (havePendingDriverRow && buffer.RowsOutput == 0) { throw new Exception("Internal error: should not have pending row when no data is produced"); } } else { if (havePendingDriverRow) { throw new Exception("Internal error: should not have pending row when no dataset is requested"); } if (buffer.Stream.Length > 0) { throw new Exception("Internal error: should not have written anything to stream when no dataset is requested"); } } // time to quit? // no dataset requested, don't have any more data, or enough rows accumulated for requested page of results? if (buffer.RowsOutput == 0 || buffer.IsFailed || !context.Request.ReturnDataset) { if (!context.Request.ReturnDataset) { // if there is no dataset sent, write response headers AFTER processing the query // records affected and whatever other stats are meaningful context.ResponseHeaders.RecordsAffected = context.RecordsAffected; Serializer.SerializeWithLengthPrefix(buffer.Stream, context.ResponseHeaders, PrefixStyle.Base128); } break; } } catch (Exception e) { buffer.Error = e; context.TrySetLastError(e); m_tracer.Exception(e); // this will go to client, and overwrite whatever we managed to put into buffer before failure using (var writer = new PqlErrorDataWriter(1, e, false)) { buffer.Stream.SetLength(0); writer.WriteTo(buffer.Stream); } return; } finally { // return the buffer back to the ring in any case context.BuffersRing.ReturnCompletedTask(buffer); } } } catch (OperationCanceledException e) { context.Cancel(e); } catch (Exception e) { if (Environment.HasShutdownStarted) { // nobody cares now return; } var cts = context.CancellationTokenSource; if (cts != null && !cts.IsCancellationRequested) { m_tracer.Exception(e); context.Cancel(e); } } finally { var ring = context.BuffersRing; if (ring != null) { ring.CompleteAddingCompletedTasks(); } if (sourceEnumerator != null) { // release driver-level resources & locks sourceEnumerator.Dispose(); } } }
private void ExecuteDeleteStatement(RequestExecutionContext context, RequestExecutionBuffer buffer, IDriverDataEnumerator sourceEnumerator) { if (sourceEnumerator == null) { return; } buffer.RowsOutput = 0; var cts = context.CancellationTokenSource; context.ClauseEvaluationContext.RowNumber = 0; context.ClauseEvaluationContext.RowNumberInOutput = context.ClauseEvaluationContext.RowNumber; var changeBuffer = context.ClauseEvaluationContext.ChangeBuffer; var changeset = m_storageDriver.CreateChangeset(changeBuffer, context.ParsedRequest.IsBulk); try { changeBuffer.ChangeType = DriverChangeType.Delete; while (!cts.IsCancellationRequested && sourceEnumerator.MoveNext()) { // if record satisfies WHERE criteria, compute updated values and give them to driver if (ApplyWhereClause(context)) { // load internal ID, it is needed sourceEnumerator.FetchAdditionalFields(); sourceEnumerator.FetchInternalEntityIdIntoChangeBuffer(changeBuffer, context); m_storageDriver.AddChange(changeset); } // row number is needed for "rownum()" Pql function context.ClauseEvaluationContext.RowNumber++; // output row number is needed for "rownumoutput()" Pql function context.ClauseEvaluationContext.RowNumberInOutput = context.ClauseEvaluationContext.RowNumber; } if (!cts.IsCancellationRequested) { context.RecordsAffected = m_storageDriver.Apply(changeset); } } catch { m_storageDriver.Discard(changeset); throw; } }
private void ExecuteInsertUpdateStatement(RequestExecutionContext context, RequestExecutionBuffer buffer, IDriverDataEnumerator sourceEnumerator, DriverChangeType changeType) { if (sourceEnumerator == null) { return; } buffer.RowsOutput = 0; var cts = context.CancellationTokenSource; var updates = context.ParsedRequest.Modify.UpdateAssignments; context.ClauseEvaluationContext.RowNumber = 0; context.ClauseEvaluationContext.RowNumberInOutput = context.ClauseEvaluationContext.RowNumber; var changeBuffer = context.ClauseEvaluationContext.ChangeBuffer; var changeset = m_storageDriver.CreateChangeset(changeBuffer, context.ParsedRequest.IsBulk); try { changeBuffer.ChangeType = changeType; while (!cts.IsCancellationRequested && sourceEnumerator.MoveNext()) { // if record satisfies WHERE criteria, compute updated values and give them to driver if (ApplyWhereClause(context)) { // make sure we have values for fields in SET expressions sourceEnumerator.FetchAdditionalFields(); BitVector.SetAll(changeBuffer.Data.NotNulls, false); for (var ordinal = 0; ordinal < updates.Count; ordinal++) { if (updates[ordinal].CompiledExpression != null) { updates[ordinal].CompiledExpression(context.ClauseEvaluationContext); } } // this will either take internal entity id from current data row // or from the computed change buffer data (for non-bulk inserts) sourceEnumerator.FetchInternalEntityIdIntoChangeBuffer(changeBuffer, context); m_storageDriver.AddChange(changeset); } // row number is needed for "rownum()" Pql function context.ClauseEvaluationContext.RowNumber++; // output row number is needed for "rownumoutput()" Pql function context.ClauseEvaluationContext.RowNumberInOutput = context.ClauseEvaluationContext.RowNumber; } if (!cts.IsCancellationRequested) { context.RecordsAffected = m_storageDriver.Apply(changeset); } } catch { m_storageDriver.Discard(changeset); throw; } }
public void FetchInternalEntityIdIntoChangeBuffer(DriverChangeBuffer changeBuffer, RequestExecutionContext context) { // reference copy is safe because storage driver is responsible for copying this value // when it reads the change buffer changeBuffer.InternalEntityId = context.DriverOutputBuffer.InternalEntityId; }
private IDriverDataEnumerator CreatePseudoEnumeratorForInsertValues(RequestExecutionContext context) { // generate a single logical entry and break // actual values are computed as part of universal insert/update routine return new SourcedEnumerator( DriverRowData.DeriveRepresentationType(context.ParsedRequest.TargetEntityPkField.DbType)); }
private static bool ApplyWhereClause(RequestExecutionContext context) { return context.ParsedRequest.BaseDataset.WhereClauseProcessor == null || context.ParsedRequest.BaseDataset.WhereClauseProcessor(context.ClauseEvaluationContext); }
/// <summary> /// Takes driver output row and produces SELECt output row. /// Returns ESTIMATED upper bound for byte size of the new SELECT output row. /// </summary> private static int ProduceOutputRow(RequestExecutionContext context) { var output = context.OutputDataBuffer; var outputSize = output.GetMinimumSize(); var ctx = context.ClauseEvaluationContext; BitVector.SetAll(output.NotNulls, false); for (var ordinal = 0; ordinal < context.ResponseHeaders.FieldCount; ordinal++) { var indexInArray = output.FieldArrayIndexes[ordinal]; var isNullable = context.ParsedRequest.Select.OutputColumns[ordinal].IsNullable; var compiledExpression = context.ParsedRequest.Select.OutputColumns[ordinal].CompiledExpression; switch (output.FieldTypes[ordinal]) { //case DbType.VarNumeric: // break; case DbType.AnsiStringFixedLength: case DbType.StringFixedLength: case DbType.Xml: case DbType.AnsiString: case DbType.String: { var value = ((Func<ClauseEvaluationContext, string>)compiledExpression)(ctx); if (value != null) { output.StringData[indexInArray].SetLength(value.Length); for (var i = 0; i < value.Length; i++) { output.StringData[indexInArray].Data[i] = value[i]; } BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(int) + value.Length * sizeof(char); } } break; case DbType.Binary: case DbType.Object: { var data = ((Func<ClauseEvaluationContext, SizableArrayOfByte>)compiledExpression)(ctx); if (data != null) { var len = data.Length; output.BinaryData[indexInArray].SetLength(len); if (len > 0) { Buffer.BlockCopy(data.Data, 0, output.BinaryData[indexInArray].Data, 0, len); } BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(int) + data.Length; } } break; case DbType.Byte: { if (isNullable) { var value = ((Func<ClauseEvaluationContext, UnboxableNullable<Byte>>)compiledExpression)(ctx); if (value.HasValue) { output.ValueData8Bytes[indexInArray].AsByte = value.Value; BitVector.Set(output.NotNulls, ordinal); outputSize++; } } else { var value = ((Func<ClauseEvaluationContext, Byte>)compiledExpression)(ctx); output.ValueData8Bytes[indexInArray].AsByte = value; BitVector.Set(output.NotNulls, ordinal); outputSize++; } } break; case DbType.Boolean: { if (isNullable) { var value = ((Func<ClauseEvaluationContext, UnboxableNullable<Boolean>>)compiledExpression)(ctx); if (value.HasValue) { output.ValueData8Bytes[indexInArray].AsBoolean = value.Value; BitVector.Set(output.NotNulls, ordinal); outputSize++; } } else { var value = ((Func<ClauseEvaluationContext, Boolean>)compiledExpression)(ctx); output.ValueData8Bytes[indexInArray].AsBoolean = value; BitVector.Set(output.NotNulls, ordinal); outputSize++; } } break; case DbType.Currency: case DbType.Decimal: { if (isNullable) { var value = ((Func<ClauseEvaluationContext, UnboxableNullable<Decimal>>)compiledExpression)(ctx); if (value.HasValue) { output.ValueData16Bytes[indexInArray].AsDecimal = value.Value; BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(Decimal); } } else { var value = ((Func<ClauseEvaluationContext, Decimal>)compiledExpression)(ctx); output.ValueData16Bytes[indexInArray].AsDecimal = value; BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(Decimal); } } break; case DbType.Date: case DbType.DateTime: case DbType.DateTime2: { if (isNullable) { var value = ((Func<ClauseEvaluationContext, UnboxableNullable<DateTime>>)compiledExpression)(ctx); if (value.HasValue) { output.ValueData8Bytes[indexInArray].AsDateTime = value.Value; BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(Int64); } } else { var value = ((Func<ClauseEvaluationContext, DateTime>)compiledExpression)(ctx); output.ValueData8Bytes[indexInArray].AsDateTime = value; BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(Int64); } } break; case DbType.Time: { if (isNullable) { var value = ((Func<ClauseEvaluationContext, UnboxableNullable<TimeSpan>>)compiledExpression)(ctx); if (value.HasValue) { output.ValueData8Bytes[indexInArray].AsTimeSpan = value.Value; BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(Int64); } } else { var value = ((Func<ClauseEvaluationContext, TimeSpan>)compiledExpression)(ctx); output.ValueData8Bytes[indexInArray].AsTimeSpan = value; BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(Int64); } } break; case DbType.Double: { if (isNullable) { var value = ((Func<ClauseEvaluationContext, UnboxableNullable<Double>>)compiledExpression)(ctx); if (value.HasValue) { output.ValueData8Bytes[indexInArray].AsDouble = value.Value; BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(Double); } } else { var value = ((Func<ClauseEvaluationContext, Double>)compiledExpression)(ctx); output.ValueData8Bytes[indexInArray].AsDouble = value; BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(Double); } } break; case DbType.Guid: { if (isNullable) { var value = ((Func<ClauseEvaluationContext, UnboxableNullable<Guid>>)compiledExpression)(ctx); if (value.HasValue) { output.ValueData16Bytes[indexInArray].AsGuid = value.Value; BitVector.Set(output.NotNulls, ordinal); outputSize += 16; } } else { var value = ((Func<ClauseEvaluationContext, Guid>)compiledExpression)(ctx); output.ValueData16Bytes[indexInArray].AsGuid = value; BitVector.Set(output.NotNulls, ordinal); outputSize += 16; } } break; case DbType.Int16: { if (isNullable) { var value = ((Func<ClauseEvaluationContext, UnboxableNullable<Int16>>)compiledExpression)(ctx); if (value.HasValue) { output.ValueData8Bytes[indexInArray].AsInt16 = value.Value; BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(Int16); } } else { var value = ((Func<ClauseEvaluationContext, Int16>)compiledExpression)(ctx); output.ValueData8Bytes[indexInArray].AsInt16 = value; BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(Int16); } } break; case DbType.Int32: { if (isNullable) { var value = ((Func<ClauseEvaluationContext, UnboxableNullable<Int32>>)compiledExpression)(ctx); if (value.HasValue) { output.ValueData8Bytes[indexInArray].AsInt32 = value.Value; BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(Int32); } } else { var value = ((Func<ClauseEvaluationContext, Int32>)compiledExpression)(ctx); output.ValueData8Bytes[indexInArray].AsInt32 = value; BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(Int32); } } break; case DbType.Int64: { if (isNullable) { var value = ((Func<ClauseEvaluationContext, UnboxableNullable<Int64>>)compiledExpression)(ctx); if (value.HasValue) { output.ValueData8Bytes[indexInArray].AsInt64 = value.Value; BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(Int64); } } else { var value = ((Func<ClauseEvaluationContext, Int64>)compiledExpression)(ctx); output.ValueData8Bytes[indexInArray].AsInt64 = value; BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(Int64); } } break; case DbType.SByte: { if (isNullable) { var value = ((Func<ClauseEvaluationContext, UnboxableNullable<SByte>>)compiledExpression)(ctx); if (value.HasValue) { output.ValueData8Bytes[indexInArray].AsSByte = value.Value; BitVector.Set(output.NotNulls, ordinal); outputSize++; } } else { var value = ((Func<ClauseEvaluationContext, SByte>)compiledExpression)(ctx); output.ValueData8Bytes[indexInArray].AsSByte = value; BitVector.Set(output.NotNulls, ordinal); outputSize++; } } break; case DbType.Single: { if (isNullable) { var value = ((Func<ClauseEvaluationContext, UnboxableNullable<Single>>)compiledExpression)(ctx); if (value.HasValue) { output.ValueData8Bytes[indexInArray].AsSingle = value.Value; BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(Single); } } else { var value = ((Func<ClauseEvaluationContext, Single>)compiledExpression)(ctx); output.ValueData8Bytes[indexInArray].AsSingle = value; BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(Single); } } break; case DbType.UInt16: { if (isNullable) { var value = ((Func<ClauseEvaluationContext, UnboxableNullable<UInt16>>)compiledExpression)(ctx); if (value.HasValue) { output.ValueData8Bytes[indexInArray].AsUInt16 = value.Value; BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(UInt16); } } else { var value = ((Func<ClauseEvaluationContext, UInt16>)compiledExpression)(ctx); output.ValueData8Bytes[indexInArray].AsUInt16 = value; BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(UInt16); } } break; case DbType.UInt32: { if (isNullable) { var value = ((Func<ClauseEvaluationContext, UnboxableNullable<UInt32>>)compiledExpression)(ctx); if (value.HasValue) { output.ValueData8Bytes[indexInArray].AsUInt32 = value.Value; BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(UInt32); } } else { var value = ((Func<ClauseEvaluationContext, UInt32>)compiledExpression)(ctx); output.ValueData8Bytes[indexInArray].AsUInt32 = value; BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(UInt32); } } break; case DbType.UInt64: { if (isNullable) { var value = ((Func<ClauseEvaluationContext, UnboxableNullable<UInt64>>)compiledExpression)(ctx); if (value.HasValue) { output.ValueData8Bytes[indexInArray].AsUInt64 = value.Value; BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(UInt64); } } else { var value = ((Func<ClauseEvaluationContext, UInt64>)compiledExpression)(ctx); output.ValueData8Bytes[indexInArray].AsUInt64 = value; BitVector.Set(output.NotNulls, ordinal); outputSize += sizeof(UInt64); } } break; case DbType.DateTimeOffset: { if (isNullable) { var value = ((Func<ClauseEvaluationContext, UnboxableNullable<DateTimeOffset>>)compiledExpression)(ctx); if (value.HasValue) { output.ValueData16Bytes[indexInArray].AsDateTimeOffset = value.Value; BitVector.Set(output.NotNulls, ordinal); outputSize += 12; } } else { var value = ((Func<ClauseEvaluationContext, DateTimeOffset>)compiledExpression)(ctx); output.ValueData16Bytes[indexInArray].AsDateTimeOffset = value; BitVector.Set(output.NotNulls, ordinal); outputSize += 12; } } break; default: throw new Exception("Invalid data type: " + output.FieldTypes[ordinal]); } } return outputSize; }
public void EndExecution(RequestExecutionContext context, bool waitForProducerThread) { if (m_disposed) { return; } var processors = m_activeProcessors; if (processors != null) { Task task; if (processors.TryRemove(context, out task) && waitForProducerThread && !task.IsCompleted) { task.Wait(); } } }
public void FetchInternalEntityIdIntoChangeBuffer(DriverChangeBuffer changeBuffer, RequestExecutionContext context) { changeBuffer.InternalEntityId = context.DriverOutputBuffer.InternalEntityId; }
/// <summary> /// Enumerates through data for a particular SELECT query. /// Inside MoveNext() imlementation, MUST populate the same instance of row data, pointed to by <see cref="RequestExecutionContext.DriverOutputBuffer"/>. /// Yields dummy true value, its value and data type are reserved for future use. /// </summary> public IDriverDataEnumerator GetData(RequestExecutionContext context) { CheckInitialized(); var data = m_dataContainer.RequireDocumentContainer(context.ParsedRequest.TargetEntity.DocumentType); if (context.ParsedRequest.IsBulk) { // engine expects us to use the bulk input data iterator to fetch data rows return data.GetBulkUpdateEnumerator( context.ParsedRequest.BaseDataset.BaseFields, context.DriverOutputBuffer, context.InputDataEnumerator); } if (context.ParsedRequest.BaseDataset.OrderClauseFields.Count == 0) { return data.GetUnorderedEnumerator( context.ParsedRequest.BaseDataset.BaseFields, context.ParsedRequest.BaseDataset.BaseFieldsMainCount, context.DriverOutputBuffer); } if (context.ParsedRequest.BaseDataset.OrderClauseFields.Count > 1) { throw new InvalidOperationException("Sorting by more than one field is not supported yet"); } return data.GetOrderedEnumerator( context.ParsedRequest.BaseDataset.BaseFields, context.ParsedRequest.BaseDataset.BaseFieldsMainCount, context.DriverOutputBuffer, context.ParsedRequest.BaseDataset.OrderClauseFields[0].Item1, context.ParsedRequest.BaseDataset.OrderClauseFields[0].Item2); }
private void ReadRequest(RequestExecutionContext context) { Serializer.MergeWithLengthPrefix(context.RequestMessage.Stream, context.Request, PrefixStyle.Base128); if (context.Request.PrepareOnly) { // prevent any ambiguities context.Request.ReturnDataset = false; } if (context.Request.HaveParameters) { context.ParsedRequest.HaveParametersDataInput = true; Serializer.MergeWithLengthPrefix(context.RequestMessage.Stream, context.RequestParameters, PrefixStyle.Base128); ReadParametersDataInput(context); } if (context.Request.HaveRequestBulk) { context.ParsedRequest.IsBulk = true; Serializer.MergeWithLengthPrefix(context.RequestMessage.Stream, context.RequestBulk, PrefixStyle.Base128); // after this point in stream, bulk input data will be read by instance of InputDataStreamEnumerator // e.g. we're not yet done reading the request stream here } if (m_tracer.IsInfoEnabled) { var cmdText = context.Request.CommandText; if (string.IsNullOrEmpty(cmdText) && context.Request.HaveRequestBulk) { cmdText = string.Format("Bulk {0} with {2} items on {1}", context.RequestBulk.DbStatementType, context.RequestBulk.EntityName, context.RequestBulk.InputItemsCount); } m_tracer.Info("Received command: " + cmdText); } // bring up cache record var cacheKey = ParsedRequestCache.GetRequestHash(context.Request, context.RequestBulk, context.RequestParameters); var cacheInfo = m_parsedRequestCache.AddOrGetExisting(cacheKey, context.Request.HaveParameters); context.AttachCachedInfo(cacheInfo); // populate cache record if (!cacheInfo.HaveRequestHeaders) { lock (cacheInfo) { cacheInfo.CheckIsError(); if (!cacheInfo.HaveRequestHeaders) { cacheInfo.ReadRequestHeaders(context.Request, context.RequestParameters, context.RequestBulk, context.ParsedRequest); } } } }
private void ReadParametersDataInput(RequestExecutionContext context) { var headers = context.RequestParameters; var parsed = context.ParsedRequest; var paramCount = headers.DataTypes.Length; if (headers.IsCollectionFlags.Length != BitVector.GetArrayLength(paramCount)) { throw new Exception(string.Format("BitVector for isCollection flags is broken")); } parsed.Params.Names = headers.Names; parsed.Params.DataTypes = headers.DataTypes; parsed.Params.OrdinalToLocalOrdinal = new int[headers.DataTypes.Length]; var collectionCount = 0; var valueCount = 0; for (var ordinal = 0; ordinal < paramCount; ordinal++) { var flag = BitVector.Get(headers.IsCollectionFlags, ordinal); if (flag) { parsed.Params.OrdinalToLocalOrdinal[ordinal] = collectionCount; collectionCount++; } else { parsed.Params.OrdinalToLocalOrdinal[ordinal] = valueCount; valueCount++; } } // collections are stored separately if (collectionCount > 0) { parsed.Params.InputCollections = new object[collectionCount]; } // single values are stored in an instance of DriverRowData if (valueCount > 0) { var fieldTypesForValues = new DbType[valueCount]; for (var ordinal = 0; ordinal < headers.DataTypes.Length; ordinal++) { if (!BitVector.Get(headers.IsCollectionFlags, ordinal)) { fieldTypesForValues[parsed.Params.OrdinalToLocalOrdinal[ordinal]] = headers.DataTypes[ordinal]; } } parsed.Params.InputValues = new DriverRowData(fieldTypesForValues); } parsed.Bulk.Attach(context.RequestMessage.Stream); try { using (var reader = new BinaryReader(parsed.Bulk, Encoding.UTF8, true)) { StringBuilder stringBuilder = null; var notnulls = new int[BitVector.GetArrayLength(paramCount)]; BitVector.Read(notnulls, paramCount, reader); for (var ordinal = 0; ordinal < paramCount; ordinal++) { var iscollection = BitVector.Get(headers.IsCollectionFlags, ordinal); if (BitVector.Get(notnulls, ordinal)) { var dbType = headers.DataTypes[ordinal]; if (stringBuilder == null && RowData.DeriveSystemType(dbType) == typeof (string)) { stringBuilder = new StringBuilder(); } // we have more than one destination, each storing a subset of input values // so ordinals are different from "flat" zero-to-paramCount enumeration var localOrdinal = parsed.Params.OrdinalToLocalOrdinal[ordinal]; if (iscollection) { parsed.Params.InputCollections[localOrdinal] = ReadCollection(dbType, reader, stringBuilder); } else { BitVector.Set(parsed.Params.InputValues.NotNulls, localOrdinal); ReadPrimitiveValue(parsed.Params.InputValues, localOrdinal, reader, stringBuilder); } } } } // client sets stream end marker after parameters data, make sure we read it // otherwise subsequent bulk reader may fail if (-1 != parsed.Bulk.ReadByte()) { throw new Exception("Did not find the end of parameters data block when expected"); } } finally { parsed.Bulk.Detach(); } }
private bool WriteItems( RequestExecutionContext context, RequestExecutionBuffer buffer, IDriverDataEnumerator sourceEnumerator, bool havePendingDriverRow) { // have to check for disposal here, because this method gets invoked in a loop if (m_disposed) { buffer.Error = new ObjectDisposedException("This data engine has been disposed"); return false; } if (sourceEnumerator == null) { return false; } var stream = buffer.Stream; var writer = buffer.Writer; var cts = context.CancellationTokenSource; buffer.RowsOutput = 0; var hasPendingWrite = false; var mustReturnDataset = context.Request.ReturnDataset; var lastValidLength = stream.Length; var totalRowsProduced = context.TotalRowsProduced; var recordsAffected = context.RecordsAffected; var rowsOutputLocally = 0; var func = context.ParsedRequest.BaseDataset.Paging.Offset; var pagingOffset = ReferenceEquals(func, null) ? 0 : func(context.ParsedRequest.Params.InputValues); func = context.ParsedRequest.BaseDataset.Paging.PageSize; var pageSize = ReferenceEquals(func, null) ? Int32.MaxValue : func(context.ParsedRequest.Params.InputValues); // one row might not have fit into previous buffer, let's write it now if (havePendingDriverRow) { try { // no need to apply paging and where clause for pending row: we applied them already // also expect that a buffer can accomodate at least one row // - this requires that rows are never larger than RequestExecutionBuffer.MaxBytesPerBuffer ProduceOutputRow(context); context.OutputDataBuffer.Write(writer); rowsOutputLocally++; lastValidLength = stream.Length; } catch (Exception e) { throw new Exception("Internal buffer may be too small to fit even a single data row", e); } } // now let's deal with remaining items in the enumerator while (lastValidLength < RequestExecutionBuffer.MaxBytesPerBuffer && !cts.IsCancellationRequested) { if (recordsAffected >= pageSize || !sourceEnumerator.MoveNext()) { // enough rows accumulated, or no more rows from driver: halt break; } // if record satisfies WHERE criteria, it gets counted into pre-paging total if (ApplyWhereClause(context)) { var isAccumulating = totalRowsProduced >= pagingOffset; totalRowsProduced++; if (isAccumulating) { // once WHERE has been evaluated, read all remaining fields for this row into the buffer sourceEnumerator.FetchAdditionalFields(); // output row number is needed for "rownumoutput()" Pql function context.ClauseEvaluationContext.RowNumberInOutput = recordsAffected; // increment counter BEFORE producing and writing // even if we fail to write into current buffer, we'll write into next one recordsAffected++; // this will be false when clients do ExecuteNonQuery or Prepare if (mustReturnDataset) { // produce SELECT (output scheme) from FROM (raw data from storage driver) var estimatedSize = ProduceOutputRow(context); if (lastValidLength + estimatedSize > RequestExecutionBuffer.MaxBytesPerBuffer) { // store pending write and return hasPendingWrite = true; break; } // MemoryStream will throw NotSupportedException when trying to expand beyond fixed buffer size // this should never happen (see check above) context.OutputDataBuffer.Write(writer); // this counter gets incremented AFTER writing, // because it indicates how many rows have in fact been put into current block rowsOutputLocally++; } lastValidLength = stream.Length; } // row number is needed for "rownum()" Pql function context.ClauseEvaluationContext.RowNumber++; } } buffer.RowsOutput = rowsOutputLocally; context.RecordsAffected = recordsAffected; context.TotalRowsProduced = totalRowsProduced; stream.Seek(0, SeekOrigin.Begin); return hasPendingWrite; }
/// <summary> /// Clients can supply a stream of data with bulk requests. /// This input stream may contain values for any fields, but its usage is determined by request type. /// For SELECT and DELETE bulk requests, engine will only use values of primary key field. /// For INSERT and UPDATE bulk requests, engine will use both PK values and other fields values. /// However, iterator will read and parse ALL values in the input stream regardless of which values are used. /// </summary> private IDriverDataEnumerator CreateInputDataEnumerator(RequestExecutionContext context) { var types = new DbType[context.ParsedRequest.Modify.InsertUpdateSetClauses.Count]; for (var i = 0; i < types.Length; i++) { var field = context.ParsedRequest.Modify.ModifiedFields[i]; types[i] = field.DbType; } context.ParsedRequest.Bulk.Attach(context.RequestMessage.Stream); return new InputDataStreamEnumerator( context.RequestBulk.InputItemsCount, types, context.ParsedRequest.Bulk, context.ClauseEvaluationContext.InputRow); }