コード例 #1
0
        private void ExecuteDeleteStatement(RequestExecutionContext context, RequestExecutionBuffer buffer, IDriverDataEnumerator sourceEnumerator)
        {
            if (sourceEnumerator == null)
            {
                return;
            }

            buffer.RowsOutput = 0;

            var cts = context.CancellationTokenSource;

            context.ClauseEvaluationContext.RowNumber         = 0;
            context.ClauseEvaluationContext.RowNumberInOutput = context.ClauseEvaluationContext.RowNumber;
            var changeBuffer = context.ClauseEvaluationContext.ChangeBuffer;

            var changeset = m_storageDriver.CreateChangeset(changeBuffer, context.ParsedRequest.IsBulk);

            try
            {
                changeBuffer.ChangeType = DriverChangeType.Delete;

                while (!cts.IsCancellationRequested && sourceEnumerator.MoveNext())
                {
                    // if record satisfies WHERE criteria, compute updated values and give them to driver
                    if (ApplyWhereClause(context))
                    {
                        // load internal ID, it is needed
                        sourceEnumerator.FetchAdditionalFields();
                        sourceEnumerator.FetchInternalEntityIdIntoChangeBuffer(changeBuffer, context);

                        m_storageDriver.AddChange(changeset);
                    }

                    // row number is needed for "rownum()" Pql function
                    context.ClauseEvaluationContext.RowNumber++;
                    // output row number is needed for "rownumoutput()" Pql function
                    context.ClauseEvaluationContext.RowNumberInOutput = context.ClauseEvaluationContext.RowNumber;
                }

                if (!cts.IsCancellationRequested)
                {
                    context.RecordsAffected = m_storageDriver.Apply(changeset);
                }
            }
            catch
            {
                m_storageDriver.Discard(changeset);
                throw;
            }
        }
コード例 #2
0
        public override bool MoveNext()
        {
            var bmpList = DataContainer.ValidDocumentsBitmap;

            // scroll forward on input data until another matching document is found
            while (m_inputEnumerator.MoveNext())
            {
                var entityId = m_inputEnumerator.Current.InternalEntityId;
                // document must exist and be valid (not deleted)
                if (DataContainer.DocumentIdToIndex.TryGetValueInt32(entityId, ref Position))
                {
                    if (bmpList.SafeGet(Position))
                    {
                        HaveData = true;
                        return(true);
                    }
                }
            }

            Position = -1;
            HaveData = false;
            return(false);
        }
コード例 #3
0
        private bool WriteItems(
            RequestExecutionContext context, RequestExecutionBuffer buffer, IDriverDataEnumerator sourceEnumerator, bool havePendingDriverRow)
        {
            // have to check for disposal here, because this method gets invoked in a loop
            if (m_disposed)
            {
                buffer.Error = new ObjectDisposedException("This data engine has been disposed");
                return false;
            }

            if (sourceEnumerator == null)
            {
                return false;
            }

            var stream = buffer.Stream;
            var writer = buffer.Writer;
            var cts = context.CancellationTokenSource;

            buffer.RowsOutput = 0;

            var hasPendingWrite = false;
            var mustReturnDataset = context.Request.ReturnDataset;
            var lastValidLength = stream.Length;
            var totalRowsProduced = context.TotalRowsProduced;
            var recordsAffected = context.RecordsAffected;
            var rowsOutputLocally = 0;

            var func = context.ParsedRequest.BaseDataset.Paging.Offset;
            var pagingOffset = ReferenceEquals(func, null) ? 0 : func(context.ParsedRequest.Params.InputValues);

            func = context.ParsedRequest.BaseDataset.Paging.PageSize;
            var pageSize = ReferenceEquals(func, null) ? Int32.MaxValue : func(context.ParsedRequest.Params.InputValues);

            // one row might not have fit into previous buffer, let's write it now
            if (havePendingDriverRow)
            {
                try
                {
                    // no need to apply paging and where clause for pending row: we applied them already
                    // also expect that a buffer can accomodate at least one row
                    // - this requires that rows are never larger than RequestExecutionBuffer.MaxBytesPerBuffer
                    ProduceOutputRow(context);
                    context.OutputDataBuffer.Write(writer);

                    rowsOutputLocally++;
                    lastValidLength = stream.Length;
                }
                catch (Exception e)
                {
                    throw new Exception("Internal buffer may be too small to fit even a single data row", e);
                }
            }

            // now let's deal with remaining items in the enumerator
            while (lastValidLength < RequestExecutionBuffer.MaxBytesPerBuffer && !cts.IsCancellationRequested)
            {
                if (recordsAffected >= pageSize || !sourceEnumerator.MoveNext())
                {
                    // enough rows accumulated, or no more rows from driver: halt
                    break;
                }

                // if record satisfies WHERE criteria, it gets counted into pre-paging total
                if (ApplyWhereClause(context))
                {
                    var isAccumulating = totalRowsProduced >= pagingOffset;

                    totalRowsProduced++;

                    if (isAccumulating)
                    {
                        // once WHERE has been evaluated, read all remaining fields for this row into the buffer
                        sourceEnumerator.FetchAdditionalFields();

                        // output row number is needed for "rownumoutput()" Pql function
                        context.ClauseEvaluationContext.RowNumberInOutput = recordsAffected;

                        // increment counter BEFORE producing and writing
                        // even if we fail to write into current buffer, we'll write into next one
                        recordsAffected++;

                        // this will be false when clients do ExecuteNonQuery or Prepare
                        if (mustReturnDataset)
                        {
                            // produce SELECT (output scheme) from FROM (raw data from storage driver)
                            var estimatedSize = ProduceOutputRow(context);

                            if (lastValidLength + estimatedSize > RequestExecutionBuffer.MaxBytesPerBuffer)
                            {
                                // store pending write and return
                                hasPendingWrite = true;
                                break;
                            }

                            // MemoryStream will throw NotSupportedException when trying to expand beyond fixed buffer size
                            // this should never happen (see check above)
                            context.OutputDataBuffer.Write(writer);

                            // this counter gets incremented AFTER writing,
                            // because it indicates how many rows have in fact been put into current block
                            rowsOutputLocally++;
                        }

                        lastValidLength = stream.Length;
                    }

                    // row number is needed for "rownum()" Pql function
                    context.ClauseEvaluationContext.RowNumber++;
                }
            }

            buffer.RowsOutput = rowsOutputLocally;
            context.RecordsAffected = recordsAffected;
            context.TotalRowsProduced = totalRowsProduced;

            stream.Seek(0, SeekOrigin.Begin);
            return hasPendingWrite;
        }
コード例 #4
0
ファイル: DataEngine.cs プロジェクト: adrobyazko-softheme/PQL
        private void ExecuteInsertUpdateStatement(RequestExecutionContext context, RequestExecutionBuffer buffer, IDriverDataEnumerator sourceEnumerator, DriverChangeType changeType)
        {
            if (sourceEnumerator == null)
            {
                return;
            }

            buffer.RowsOutput = 0;

            var cts = context.CancellationTokenSource;

            var updates = context.ParsedRequest.Modify.UpdateAssignments;

            context.ClauseEvaluationContext.RowNumber = 0;
            context.ClauseEvaluationContext.RowNumberInOutput = context.ClauseEvaluationContext.RowNumber;
            var changeBuffer = context.ClauseEvaluationContext.ChangeBuffer;

            var changeset = m_storageDriver.CreateChangeset(changeBuffer, context.ParsedRequest.IsBulk);
            try
            {
                changeBuffer.ChangeType = changeType;

                while (!cts.IsCancellationRequested && sourceEnumerator.MoveNext())
                {
                    // if record satisfies WHERE criteria, compute updated values and give them to driver
                    if (ApplyWhereClause(context))
                    {
                        // make sure we have values for fields in SET expressions
                        sourceEnumerator.FetchAdditionalFields();

                        BitVector.SetAll(changeBuffer.Data.NotNulls, false);
                        for (var ordinal = 0; ordinal < updates.Count; ordinal++)
                        {
                            if (updates[ordinal].CompiledExpression != null)
                            {
                                updates[ordinal].CompiledExpression(context.ClauseEvaluationContext);
                            }
                        }

                        // this will either take internal entity id from current data row
                        // or from the computed change buffer data (for non-bulk inserts)
                        sourceEnumerator.FetchInternalEntityIdIntoChangeBuffer(changeBuffer, context);

                        m_storageDriver.AddChange(changeset);
                    }

                    // row number is needed for "rownum()" Pql function
                    context.ClauseEvaluationContext.RowNumber++;
                    // output row number is needed for "rownumoutput()" Pql function
                    context.ClauseEvaluationContext.RowNumberInOutput = context.ClauseEvaluationContext.RowNumber;
                }

                if (!cts.IsCancellationRequested)
                {
                    context.RecordsAffected = m_storageDriver.Apply(changeset);
                }
            }
            catch
            {
                m_storageDriver.Discard(changeset);
                throw;
            }
        }
コード例 #5
0
ファイル: DataEngine.cs プロジェクト: adrobyazko-softheme/PQL
        private void ExecuteDeleteStatement(RequestExecutionContext context, RequestExecutionBuffer buffer, IDriverDataEnumerator sourceEnumerator)
        {
            if (sourceEnumerator == null)
            {
                return;
            }

            buffer.RowsOutput = 0;

            var cts = context.CancellationTokenSource;

            context.ClauseEvaluationContext.RowNumber = 0;
            context.ClauseEvaluationContext.RowNumberInOutput = context.ClauseEvaluationContext.RowNumber;
            var changeBuffer = context.ClauseEvaluationContext.ChangeBuffer;

            var changeset = m_storageDriver.CreateChangeset(changeBuffer, context.ParsedRequest.IsBulk);
            try
            {
                changeBuffer.ChangeType = DriverChangeType.Delete;

                while (!cts.IsCancellationRequested && sourceEnumerator.MoveNext())
                {
                    // if record satisfies WHERE criteria, compute updated values and give them to driver
                    if (ApplyWhereClause(context))
                    {
                        // load internal ID, it is needed
                        sourceEnumerator.FetchAdditionalFields();
                        sourceEnumerator.FetchInternalEntityIdIntoChangeBuffer(changeBuffer, context);

                        m_storageDriver.AddChange(changeset);
                    }

                    // row number is needed for "rownum()" Pql function
                    context.ClauseEvaluationContext.RowNumber++;
                    // output row number is needed for "rownumoutput()" Pql function
                    context.ClauseEvaluationContext.RowNumberInOutput = context.ClauseEvaluationContext.RowNumber;
                }

                if (!cts.IsCancellationRequested)
                {
                    context.RecordsAffected = m_storageDriver.Apply(changeset);
                }
            }
            catch
            {
                m_storageDriver.Discard(changeset);
                throw;
            }
        }
コード例 #6
0
        private void ExecuteInsertUpdateStatement(RequestExecutionContext context, RequestExecutionBuffer buffer, IDriverDataEnumerator sourceEnumerator, DriverChangeType changeType)
        {
            if (sourceEnumerator == null)
            {
                return;
            }

            buffer.RowsOutput = 0;

            var cts = context.CancellationTokenSource;

            var updates = context.ParsedRequest.Modify.UpdateAssignments;

            context.ClauseEvaluationContext.RowNumber         = 0;
            context.ClauseEvaluationContext.RowNumberInOutput = context.ClauseEvaluationContext.RowNumber;
            var changeBuffer = context.ClauseEvaluationContext.ChangeBuffer;

            var changeset = m_storageDriver.CreateChangeset(changeBuffer, context.ParsedRequest.IsBulk);

            try
            {
                changeBuffer.ChangeType = changeType;

                while (!cts.IsCancellationRequested && sourceEnumerator.MoveNext())
                {
                    // if record satisfies WHERE criteria, compute updated values and give them to driver
                    if (ApplyWhereClause(context))
                    {
                        // make sure we have values for fields in SET expressions
                        sourceEnumerator.FetchAdditionalFields();

                        BitVector.SetAll(changeBuffer.Data.NotNulls, false);
                        for (var ordinal = 0; ordinal < updates.Count; ordinal++)
                        {
                            if (updates[ordinal].CompiledExpression != null)
                            {
                                updates[ordinal].CompiledExpression(context.ClauseEvaluationContext);
                            }
                        }

                        // this will either take internal entity id from current data row
                        // or from the computed change buffer data (for non-bulk inserts)
                        sourceEnumerator.FetchInternalEntityIdIntoChangeBuffer(changeBuffer, context);

                        m_storageDriver.AddChange(changeset);
                    }

                    // row number is needed for "rownum()" Pql function
                    context.ClauseEvaluationContext.RowNumber++;
                    // output row number is needed for "rownumoutput()" Pql function
                    context.ClauseEvaluationContext.RowNumberInOutput = context.ClauseEvaluationContext.RowNumber;
                }

                if (!cts.IsCancellationRequested)
                {
                    context.RecordsAffected = m_storageDriver.Apply(changeset);
                }
            }
            catch
            {
                m_storageDriver.Discard(changeset);
                throw;
            }
        }
コード例 #7
0
        private bool WriteItems(
            RequestExecutionContext context, RequestExecutionBuffer buffer, IDriverDataEnumerator sourceEnumerator, bool havePendingDriverRow)
        {
            // have to check for disposal here, because this method gets invoked in a loop
            if (m_disposed)
            {
                buffer.Error = new ObjectDisposedException("This data engine has been disposed");
                return(false);
            }

            if (sourceEnumerator == null)
            {
                return(false);
            }

            var stream = buffer.Stream;
            var writer = buffer.Writer;
            var cts    = context.CancellationTokenSource;

            buffer.RowsOutput = 0;

            var hasPendingWrite   = false;
            var mustReturnDataset = context.Request.ReturnDataset;
            var lastValidLength   = stream.Length;
            var totalRowsProduced = context.TotalRowsProduced;
            var recordsAffected   = context.RecordsAffected;
            var rowsOutputLocally = 0;

            var func         = context.ParsedRequest.BaseDataset.Paging.Offset;
            var pagingOffset = ReferenceEquals(func, null) ? 0 : func(context.ParsedRequest.Params.InputValues);

            func = context.ParsedRequest.BaseDataset.Paging.PageSize;
            var pageSize = ReferenceEquals(func, null) ? Int32.MaxValue : func(context.ParsedRequest.Params.InputValues);

            // one row might not have fit into previous buffer, let's write it now
            if (havePendingDriverRow)
            {
                try
                {
                    // no need to apply paging and where clause for pending row: we applied them already
                    // also expect that a buffer can accomodate at least one row
                    // - this requires that rows are never larger than RequestExecutionBuffer.MaxBytesPerBuffer
                    ProduceOutputRow(context);
                    context.OutputDataBuffer.Write(writer);

                    rowsOutputLocally++;
                    lastValidLength = stream.Length;
                }
                catch (Exception e)
                {
                    throw new Exception("Internal buffer may be too small to fit even a single data row", e);
                }
            }

            // now let's deal with remaining items in the enumerator
            while (lastValidLength < RequestExecutionBuffer.MaxBytesPerBuffer && !cts.IsCancellationRequested)
            {
                if (recordsAffected >= pageSize || !sourceEnumerator.MoveNext())
                {
                    // enough rows accumulated, or no more rows from driver: halt
                    break;
                }

                // if record satisfies WHERE criteria, it gets counted into pre-paging total
                if (ApplyWhereClause(context))
                {
                    var isAccumulating = totalRowsProduced >= pagingOffset;

                    totalRowsProduced++;

                    if (isAccumulating)
                    {
                        // once WHERE has been evaluated, read all remaining fields for this row into the buffer
                        sourceEnumerator.FetchAdditionalFields();

                        // output row number is needed for "rownumoutput()" Pql function
                        context.ClauseEvaluationContext.RowNumberInOutput = recordsAffected;

                        // increment counter BEFORE producing and writing
                        // even if we fail to write into current buffer, we'll write into next one
                        recordsAffected++;

                        // this will be false when clients do ExecuteNonQuery or Prepare
                        if (mustReturnDataset)
                        {
                            // produce SELECT (output scheme) from FROM (raw data from storage driver)
                            var estimatedSize = ProduceOutputRow(context);

                            if (lastValidLength + estimatedSize > RequestExecutionBuffer.MaxBytesPerBuffer)
                            {
                                // store pending write and return
                                hasPendingWrite = true;
                                break;
                            }

                            // MemoryStream will throw NotSupportedException when trying to expand beyond fixed buffer size
                            // this should never happen (see check above)
                            context.OutputDataBuffer.Write(writer);

                            // this counter gets incremented AFTER writing,
                            // because it indicates how many rows have in fact been put into current block
                            rowsOutputLocally++;
                        }

                        lastValidLength = stream.Length;
                    }

                    // row number is needed for "rownum()" Pql function
                    context.ClauseEvaluationContext.RowNumber++;
                }
            }

            buffer.RowsOutput         = rowsOutputLocally;
            context.RecordsAffected   = recordsAffected;
            context.TotalRowsProduced = totalRowsProduced;

            stream.Seek(0, SeekOrigin.Begin);
            return(hasPendingWrite);
        }