private bool WriteItems(
            RequestExecutionContext context, RequestExecutionBuffer buffer, IDriverDataEnumerator sourceEnumerator, bool havePendingDriverRow)
        {
            // have to check for disposal here, because this method gets invoked in a loop
            if (m_disposed)
            {
                buffer.Error = new ObjectDisposedException("This data engine has been disposed");
                return false;
            }

            if (sourceEnumerator == null)
            {
                return false;
            }

            var stream = buffer.Stream;
            var writer = buffer.Writer;
            var cts = context.CancellationTokenSource;

            buffer.RowsOutput = 0;

            var hasPendingWrite = false;
            var mustReturnDataset = context.Request.ReturnDataset;
            var lastValidLength = stream.Length;
            var totalRowsProduced = context.TotalRowsProduced;
            var recordsAffected = context.RecordsAffected;
            var rowsOutputLocally = 0;

            var func = context.ParsedRequest.BaseDataset.Paging.Offset;
            var pagingOffset = ReferenceEquals(func, null) ? 0 : func(context.ParsedRequest.Params.InputValues);

            func = context.ParsedRequest.BaseDataset.Paging.PageSize;
            var pageSize = ReferenceEquals(func, null) ? Int32.MaxValue : func(context.ParsedRequest.Params.InputValues);

            // one row might not have fit into previous buffer, let's write it now
            if (havePendingDriverRow)
            {
                try
                {
                    // no need to apply paging and where clause for pending row: we applied them already
                    // also expect that a buffer can accomodate at least one row
                    // - this requires that rows are never larger than RequestExecutionBuffer.MaxBytesPerBuffer
                    ProduceOutputRow(context);
                    context.OutputDataBuffer.Write(writer);

                    rowsOutputLocally++;
                    lastValidLength = stream.Length;
                }
                catch (Exception e)
                {
                    throw new Exception("Internal buffer may be too small to fit even a single data row", e);
                }
            }

            // now let's deal with remaining items in the enumerator
            while (lastValidLength < RequestExecutionBuffer.MaxBytesPerBuffer && !cts.IsCancellationRequested)
            {
                if (recordsAffected >= pageSize || !sourceEnumerator.MoveNext())
                {
                    // enough rows accumulated, or no more rows from driver: halt
                    break;
                }

                // if record satisfies WHERE criteria, it gets counted into pre-paging total
                if (ApplyWhereClause(context))
                {
                    var isAccumulating = totalRowsProduced >= pagingOffset;

                    totalRowsProduced++;

                    if (isAccumulating)
                    {
                        // once WHERE has been evaluated, read all remaining fields for this row into the buffer
                        sourceEnumerator.FetchAdditionalFields();

                        // output row number is needed for "rownumoutput()" Pql function
                        context.ClauseEvaluationContext.RowNumberInOutput = recordsAffected;

                        // increment counter BEFORE producing and writing
                        // even if we fail to write into current buffer, we'll write into next one
                        recordsAffected++;

                        // this will be false when clients do ExecuteNonQuery or Prepare
                        if (mustReturnDataset)
                        {
                            // produce SELECT (output scheme) from FROM (raw data from storage driver)
                            var estimatedSize = ProduceOutputRow(context);

                            if (lastValidLength + estimatedSize > RequestExecutionBuffer.MaxBytesPerBuffer)
                            {
                                // store pending write and return
                                hasPendingWrite = true;
                                break;
                            }

                            // MemoryStream will throw NotSupportedException when trying to expand beyond fixed buffer size
                            // this should never happen (see check above)
                            context.OutputDataBuffer.Write(writer);

                            // this counter gets incremented AFTER writing,
                            // because it indicates how many rows have in fact been put into current block
                            rowsOutputLocally++;
                        }

                        lastValidLength = stream.Length;
                    }

                    // row number is needed for "rownum()" Pql function
                    context.ClauseEvaluationContext.RowNumber++;
                }
            }

            buffer.RowsOutput = rowsOutputLocally;
            context.RecordsAffected = recordsAffected;
            context.TotalRowsProduced = totalRowsProduced;

            stream.Seek(0, SeekOrigin.Begin);
            return hasPendingWrite;
        }
Esempio n. 2
0
        private void StartProduction(
            RequestExecutionContext context, RequestExecutionBuffer buffer, out IDriverDataEnumerator sourceEnumerator)
        {
            ReadRequest(context);

            context.AttachContainerDescriptor(m_containerDescriptor);

            if (!context.CacheInfo.HaveParsingResults)
            {
                lock (context.CacheInfo)
                {
                    context.CacheInfo.CheckIsError();
                    if (!context.CacheInfo.HaveParsingResults)
                    {
                        try
                        {
                            ParseRequest(context.Request, context.RequestBulk, context.CacheInfo.ParsedRequest, context.CancellationTokenSource.Token);
                            CompileClauses(context.ContainerDescriptor, context.CacheInfo);

                            Thread.MemoryBarrier();
                            context.CacheInfo.HaveParsingResults = true;
                        }
                        catch (Exception e)
                        {
                            // make sure that partially complete results do not become visible
                            context.CacheInfo.IsError(e);
                            throw;
                        }
                    }
                }
            }

            context.CacheInfo.WriteParsingResults(context.ParsedRequest);

            if (context.ParsedRequest.SpecialCommand.IsSpecialCommand)
            {
                sourceEnumerator = null;
                ExecuteSpecialCommandStatement(context, buffer);
                return;
            }

            // structure of output buffer depends on which fields client is asking for
            // therefore, we re-create and re-attach a driver output buffer for every request
            context.AttachDriverOutputBufferAndInputParameters(
                QueryParser.CreateDriverRowDataBuffer(context.ParsedRequest.BaseDataset.BaseFields),
                context.ParsedRequest);

            // this enumerator will yield our own driverOutputBuffer for every source record
            // e.g. the very same context.DriverOutputBuffer is going to be yielded N times from this enumerator
            if (context.ParsedRequest.StatementType == StatementType.Insert)
            {
                if (context.ParsedRequest.IsBulk)
                {
                    sourceEnumerator = CreateInputDataEnumerator(context);
                    //m_storageDriver.AllocateCapacityForDocumentType(context.ParsedRequest.TargetEntity.DocumentType, context.RequestBulk.InputItemsCount);
                }
                else
                {
                    sourceEnumerator = CreatePseudoEnumeratorForInsertValues(context);
                }
            }
            else
            {
                if (context.ParsedRequest.IsBulk)
                {
                    // for SELECT and DELETE, we only use PK values from the input enumerator
                    // for UPDATE, we use both PK values and other field values from input enumerator
                    context.AttachInputDataEnumerator(CreateInputDataEnumerator(context));
                }

                // driver returns set of rows related to given set of PK values
                // for a bulk request, sourceEnumerator will yield exactly one item for each item in input enumerator
                sourceEnumerator = m_storageDriver.GetData(context);
            }

            switch (context.ParsedRequest.StatementType)
            {
                case StatementType.Select:
                    {
                        context.AttachResponseHeaders(CreateResponseSchemeForSelect(context));
                        context.PrepareBuffersForSelect();
                        context.ResponseHeaders.RecordsAffected = 0;
                    }
                    break;
                case StatementType.Update:
                    {
                        context.AttachResponseHeaders(new DataResponse(0, "Update successful"));
                        context.PrepareBuffersForUpdate();
                        ExecuteInsertUpdateStatement(context, buffer, sourceEnumerator, DriverChangeType.Update);
                        context.ResponseHeaders.RecordsAffected = context.RecordsAffected;
                    }
                    break;
                case StatementType.Delete:
                    {
                        context.AttachResponseHeaders(new DataResponse(0, "Delete successful"));
                        context.PrepareBuffersForDelete();
                        ExecuteDeleteStatement(context, buffer, sourceEnumerator);
                        context.ResponseHeaders.RecordsAffected = context.RecordsAffected;
                    }
                    break;
                case StatementType.Insert:
                    {
                        context.AttachResponseHeaders(new DataResponse(0, "Insert successful"));
                        context.PrepareChangeBufferForInsert();
                        ExecuteInsertUpdateStatement(context, buffer, sourceEnumerator, DriverChangeType.Insert);
                        context.ResponseHeaders.RecordsAffected = context.RecordsAffected;
                    }
                    break;
                default:
                    throw new Exception("Invalid statement type: " + context.ParsedRequest.StatementType);
            }
        }
Esempio n. 3
0
        private void ExecuteInsertUpdateStatement(RequestExecutionContext context, RequestExecutionBuffer buffer, IDriverDataEnumerator sourceEnumerator, DriverChangeType changeType)
        {
            if (sourceEnumerator == null)
            {
                return;
            }

            buffer.RowsOutput = 0;

            var cts = context.CancellationTokenSource;

            var updates = context.ParsedRequest.Modify.UpdateAssignments;

            context.ClauseEvaluationContext.RowNumber = 0;
            context.ClauseEvaluationContext.RowNumberInOutput = context.ClauseEvaluationContext.RowNumber;
            var changeBuffer = context.ClauseEvaluationContext.ChangeBuffer;

            var changeset = m_storageDriver.CreateChangeset(changeBuffer, context.ParsedRequest.IsBulk);
            try
            {
                changeBuffer.ChangeType = changeType;

                while (!cts.IsCancellationRequested && sourceEnumerator.MoveNext())
                {
                    // if record satisfies WHERE criteria, compute updated values and give them to driver
                    if (ApplyWhereClause(context))
                    {
                        // make sure we have values for fields in SET expressions
                        sourceEnumerator.FetchAdditionalFields();

                        BitVector.SetAll(changeBuffer.Data.NotNulls, false);
                        for (var ordinal = 0; ordinal < updates.Count; ordinal++)
                        {
                            if (updates[ordinal].CompiledExpression != null)
                            {
                                updates[ordinal].CompiledExpression(context.ClauseEvaluationContext);
                            }
                        }

                        // this will either take internal entity id from current data row
                        // or from the computed change buffer data (for non-bulk inserts)
                        sourceEnumerator.FetchInternalEntityIdIntoChangeBuffer(changeBuffer, context);

                        m_storageDriver.AddChange(changeset);
                    }

                    // row number is needed for "rownum()" Pql function
                    context.ClauseEvaluationContext.RowNumber++;
                    // output row number is needed for "rownumoutput()" Pql function
                    context.ClauseEvaluationContext.RowNumberInOutput = context.ClauseEvaluationContext.RowNumber;
                }

                if (!cts.IsCancellationRequested)
                {
                    context.RecordsAffected = m_storageDriver.Apply(changeset);
                }
            }
            catch
            {
                m_storageDriver.Discard(changeset);
                throw;
            }
        }
Esempio n. 4
0
 private void ExecuteSpecialCommandStatement(RequestExecutionContext context, RequestExecutionBuffer buffer)
 {
     switch (context.ParsedRequest.SpecialCommand.CommandType)
     {
         case ParsedRequest.SpecialCommandData.SpecialCommandType.Defragment:
             context.AttachResponseHeaders(new DataResponse(0, "Defragmentation completed"));
             m_storageDriver.Compact(CompactionOptions.FullReindex);
             break;
         default:
             throw new ArgumentOutOfRangeException("context", context.ParsedRequest.SpecialCommand.CommandType, "Invalid special command");
     }
 }
Esempio n. 5
0
        private void ExecuteDeleteStatement(RequestExecutionContext context, RequestExecutionBuffer buffer, IDriverDataEnumerator sourceEnumerator)
        {
            if (sourceEnumerator == null)
            {
                return;
            }

            buffer.RowsOutput = 0;

            var cts = context.CancellationTokenSource;

            context.ClauseEvaluationContext.RowNumber = 0;
            context.ClauseEvaluationContext.RowNumberInOutput = context.ClauseEvaluationContext.RowNumber;
            var changeBuffer = context.ClauseEvaluationContext.ChangeBuffer;

            var changeset = m_storageDriver.CreateChangeset(changeBuffer, context.ParsedRequest.IsBulk);
            try
            {
                changeBuffer.ChangeType = DriverChangeType.Delete;

                while (!cts.IsCancellationRequested && sourceEnumerator.MoveNext())
                {
                    // if record satisfies WHERE criteria, compute updated values and give them to driver
                    if (ApplyWhereClause(context))
                    {
                        // load internal ID, it is needed
                        sourceEnumerator.FetchAdditionalFields();
                        sourceEnumerator.FetchInternalEntityIdIntoChangeBuffer(changeBuffer, context);

                        m_storageDriver.AddChange(changeset);
                    }

                    // row number is needed for "rownum()" Pql function
                    context.ClauseEvaluationContext.RowNumber++;
                    // output row number is needed for "rownumoutput()" Pql function
                    context.ClauseEvaluationContext.RowNumberInOutput = context.ClauseEvaluationContext.RowNumber;
                }

                if (!cts.IsCancellationRequested)
                {
                    context.RecordsAffected = m_storageDriver.Apply(changeset);
                }
            }
            catch
            {
                m_storageDriver.Discard(changeset);
                throw;
            }
        }