private void BuildUpdateAction(MergeSpecification specification) { var clause = new MergeActionClause(); var expression = (clause.SearchCondition = new BooleanParenthesisExpression()) as BooleanParenthesisExpression; var isNulls = BuildNullIfStatements(); if (isNulls.Count > 1) { var expressions = CreateExpressionTreeForUpdateSearch(isNulls); //Save the first expression expression.Expression = expressions.First(); clause.SearchCondition = expression; } else { clause.SearchCondition = isNulls[0]; } clause.Condition = MergeCondition.Matched; clause.Action = CreateUpdateSetActions(clause); specification.ActionClauses.Add(clause); }
private void BuildInsertAction(MergeSpecification specification) { var action = new InsertMergeAction(); var insertSource = action.Source = new ValuesInsertSource(); var row = new RowValue(); foreach (var column in _columnDescriptors) { var colRef = new ColumnReferenceExpression(); colRef.ColumnType = ColumnType.Regular; colRef.MultiPartIdentifier = new MultiPartIdentifier().Create(column.Name); action.Columns.Add(colRef); colRef = new ColumnReferenceExpression(); colRef.ColumnType = ColumnType.Regular; colRef.MultiPartIdentifier = new MultiPartIdentifier().Create(new Identifier { Value = MergeIdentifierStrings.SourceName }, column.Name); row.ColumnValues.Add(colRef); } insertSource.RowValues.Add(row); specification.ActionClauses.Add(new MergeActionClause { Action = action, Condition = MergeCondition.NotMatchedByTarget }); }
public override MergeSpecification FindMerges(MergeTrigger?mergeTrigger, SegmentInfos segmentInfos) { MergeSpecification mergeSpec = null; //System.out.println("MRMP: findMerges sis=" + segmentInfos); int numSegments = segmentInfos.Size(); IList <SegmentCommitInfo> segments = new List <SegmentCommitInfo>(); ICollection <SegmentCommitInfo> merging = Writer.Get().MergingSegments; foreach (SegmentCommitInfo sipc in segmentInfos.Segments) { if (!merging.Contains(sipc)) { segments.Add(sipc); } } numSegments = segments.Count; if (numSegments > 1 && (numSegments > 30 || Random.Next(5) == 3)) { segments = CollectionsHelper.Shuffle(segments); // TODO: sometimes make more than 1 merge? mergeSpec = new MergeSpecification(); int segsToMerge = TestUtil.NextInt(Random, 1, numSegments); mergeSpec.Add(new OneMerge(segments.SubList(0, segsToMerge))); } return(mergeSpec); }
public override MergeSpecification FindMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos) { MergeSpecification mergeSpec = null; //System.out.println("MRMP: findMerges sis=" + segmentInfos); int numSegments /* = segmentInfos.Count*/; // LUCENENET: IDE0059: Remove unnecessary value assignment IList <SegmentCommitInfo> segments = new List <SegmentCommitInfo>(); ICollection <SegmentCommitInfo> merging = base.m_writer.Get().MergingSegments; foreach (SegmentCommitInfo sipc in segmentInfos.Segments) { if (!merging.Contains(sipc)) { segments.Add(sipc); } } numSegments = segments.Count; if (numSegments > 1 && (numSegments > 30 || random.Next(5) == 3)) { segments.Shuffle(random); // TODO: sometimes make more than 1 merge? mergeSpec = new MergeSpecification(); int segsToMerge = TestUtil.NextInt32(random, 1, numSegments); mergeSpec.Add(new OneMerge(segments.SubList(0, segsToMerge))); } return(mergeSpec); }
private void BuildDeleteAction(MergeSpecification specification) { var action = new DeleteMergeAction(); specification.ActionClauses.Add(new MergeActionClause { Action = action, Condition = MergeCondition.NotMatchedBySource }); }
public override MergeSpecification FindForcedDeletesMerges(SegmentInfos infos) { if (Verbose()) { Message("findForcedDeletesMerges infos=" + Writer.Get().SegString(infos.Segments) + " forceMergeDeletesPctAllowed=" + ForceMergeDeletesPctAllowed_Renamed); } List <SegmentCommitInfo> eligible = new List <SegmentCommitInfo>(); ICollection <SegmentCommitInfo> merging = Writer.Get().MergingSegments; foreach (SegmentCommitInfo info in infos.Segments) { double pctDeletes = 100.0 * ((double)Writer.Get().NumDeletedDocs(info)) / info.Info.DocCount; if (pctDeletes > ForceMergeDeletesPctAllowed_Renamed && !merging.Contains(info)) { eligible.Add(info); } } if (eligible.Count == 0) { return(null); } eligible.Sort(new SegmentByteSizeDescending(this)); if (Verbose()) { Message("eligible=" + eligible); } int start = 0; MergeSpecification spec = null; while (start < eligible.Count) { // Don't enforce max merged size here: app is explicitly // calling forceMergeDeletes, and knows this may take a // long time / produce big segments (like forceMerge): int end = Math.Min(start + MaxMergeAtOnceExplicit_Renamed, eligible.Count); if (spec == null) { spec = new MergeSpecification(); } OneMerge merge = new OneMerge(eligible.SubList(start, end)); if (Verbose()) { Message("add merge=" + Writer.Get().SegString(merge.Segments)); } spec.Add(merge); start = end; } return(spec); }
private void SetInlineTableReference(MergeSpecification specification) { var table = (specification.TableReference = new InlineDerivedTable()) as InlineDerivedTable; table.Alias = new Identifier { Value = MergeIdentifierStrings.SourceName }; foreach (var col in _columnDescriptors) { table.Columns.Add(col.Name); } }
private void BuildMultiKeySearchCondition(MergeSpecification specification) { var comparisons = new List <BooleanComparisonExpression>(); foreach (var column in _keyColumns) { var condition = new BooleanComparisonExpression(); comparisons.Add(CreateSearchCondition(column, condition)); } specification.SearchCondition = CreateExpressionTreeForMultiKeySearchConditon(comparisons).First(); }
private void BuildMultiKeySearchCondition(MergeSpecification specification) { var comparisons = new List <BooleanComparisonExpression>(); foreach (var column in _merge.Table.Columns.Where(p => p.IsKey)) { var condition = new BooleanComparisonExpression(); comparisons.Add(CreateSearchCondition(column.Name.GetName(), condition)); } specification.SearchCondition = CreateExpressionTreeForMultiKeySearchConditon(comparisons).First(); }
public override MergeSpecification FindMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos) { MergeSpecification ms = new MergeSpecification(); if (DoMerge) { OneMerge om = new OneMerge(segmentInfos.AsList().SubList(Start, Start + Length)); ms.Add(om); DoMerge = false; return(ms); } return(null); }
public override MergeSpecification FindMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos) { MergeSpecification ms = new MergeSpecification(); if (doMerge) { OneMerge om = new OneMerge(segmentInfos.AsList().GetView(start, length)); // LUCENENET: Converted end index to length ms.Add(om); doMerge = false; return(ms); } return(null); }
private MergeSpecification SortedMergeSpecification(MergeSpecification specification) { if (specification == null) { return(null); } MergeSpecification sortingSpec = new SortingMergeSpecification(this); foreach (OneMerge merge in specification.Merges) { sortingSpec.Add(merge); } return(sortingSpec); }
/// <summary> /// Finds merges necessary to expunge all deletes from the /// index. We simply merge adjacent segments that have /// deletes, up to mergeFactor at a time. /// </summary> public override MergeSpecification FindMergesToExpungeDeletes(SegmentInfos segmentInfos, IndexWriter writer) { this.writer = writer; int numSegments = segmentInfos.Count; Message("findMergesToExpungeDeletes: " + numSegments + " segments"); MergeSpecification spec = new MergeSpecification(); int firstSegmentWithDeletions = -1; for (int i = 0; i < numSegments; i++) { SegmentInfo info = segmentInfos.Info(i); if (info.HasDeletions()) { Message(" segment " + info.name + " has deletions"); if (firstSegmentWithDeletions == -1) { firstSegmentWithDeletions = i; } else if (i - firstSegmentWithDeletions == mergeFactor) { // We've seen mergeFactor segments in a row with // deletions, so force a merge now: Message(" add merge " + firstSegmentWithDeletions + " to " + (i - 1) + " inclusive"); spec.Add(new OneMerge(segmentInfos.Range(firstSegmentWithDeletions, i), useCompoundFile)); firstSegmentWithDeletions = i; } } else if (firstSegmentWithDeletions != -1) { // End of a sequence of segments with deletions, so, // merge those past segments even if it's fewer than // mergeFactor segments Message(" add merge " + firstSegmentWithDeletions + " to " + (i - 1) + " inclusive"); spec.Add(new OneMerge(segmentInfos.Range(firstSegmentWithDeletions, i), useCompoundFile)); firstSegmentWithDeletions = -1; } } if (firstSegmentWithDeletions != -1) { Message(" add merge " + firstSegmentWithDeletions + " to " + (numSegments - 1) + " inclusive"); spec.Add(new OneMerge(segmentInfos.Range(firstSegmentWithDeletions, numSegments), useCompoundFile)); } return(spec); }
public static IList <FieldPairReference> GetFieldPairReferences( this MergeSpecification mergeSpecification, ILogger logger, SchemaFile file ) { // TODO : find out why I have missing references.. var newReferences = mergeSpecification .GetSchemaObjectReferences(logger, file) .ToList(); using (new StatementContext(file.FileContext, newReferences)) { var searchConditionPairs = mergeSpecification .SearchCondition .GetFieldPairs(logger, file); var actionClausesReferences = mergeSpecification .ActionClauses .SelectMany(actionClause => { var innerSearchConditionPairs = actionClause .SearchCondition ?.GetFieldPairs(logger, file) ?? new List <FieldPairReference>(); var actionPairs = actionClause .Action .GetFieldPairReferences(logger, file); return(innerSearchConditionPairs .Concat(actionPairs) .ToList()); }) .ToList(); var outputIntoPairs = mergeSpecification .OutputIntoClause ?.GetFieldPairs(logger, file) ?? new List <FieldPairReference>(); return(searchConditionPairs .Concat(actionClausesReferences) .Concat(outputIntoPairs) .ToList()); } }
public static EngineResult Evaluate(DataModificationSpecification dml, Scope scope) { IOutputSink sink; // TODO: support scalar expressions in TableOutputSink, not just column names // how to handle INSERTED. and DELETED. aliases? if (dml.OutputClause != null || dml.OutputIntoClause != null) { sink = new TableOutputSink( (dml.OutputClause?.SelectColumns ?? dml.OutputIntoClause?.SelectColumns)? .Select(s => new Column { Name = ((ColumnReferenceExpression)((SelectScalarExpression)s).Expression) .MultiPartIdentifier.Identifiers.Select(x => x.Value).ToArray(), Type = DbType.AnsiString }).ToList()); } else { sink = new NullOutputSink(); } var result = dml switch { InsertSpecification insert => Evaluate(insert, sink, scope), MergeSpecification merge => Evaluate(merge, sink, scope), DeleteSpecification delete => Evaluate(delete, sink, scope), UpdateSpecification update => Evaluate(update, sink, scope), _ => throw FeatureNotSupportedException.Subtype(dml) }; if (dml.OutputIntoClause != null) { var(table, scope2) = Evaluate(dml.OutputIntoClause.IntoTable, null, scope); Evaluate( table, dml.OutputIntoClause.IntoTableColumns, ((TableOutputSink)sink).Output, new NullOutputSink(), scope2); } return(dml.OutputClause != null ? new EngineResult(((TableOutputSink)sink).Output) : result); } }
public void MergeExistingSample() { var api = new NapierClient(); MergeSpecification mergeResources = new MergeSpecification(); var winner = "<WINNERID>"; var loser1 = "<LOSERID1>"; var loser2 = "<LOSERID2>"; mergeResources.WinnerId = winner; mergeResources.IdsToMerge = new[] { loser1, loser2 }; var mergeResponse = api.MergeExisting("Contact", new[] { mergeResources }, null); if (mergeResponse.BatchCompleted) { foreach (var result in mergeResponse.Results) { if (result.OperationSucceeded) { Console.WriteLine("The following resources attempted to merged into {0}:", result.WinnerId); foreach (var operationResult in result.Results) { Console.Write("Id: {0}. Result: ", operationResult.Id); if (operationResult.OperationSucceeded) { Console.WriteLine("Success"); } else { Console.WriteLine("Failed"); } } } else { Console.WriteLine("The specified resource could not be merged."); Console.WriteLine(result.ErrorString + " " + result.ErrorMessage); } } } else { Console.WriteLine("The operation could not be performed."); Console.WriteLine(mergeResponse.ErrorMessage); } }
public override MergeSpecification FindForcedMerges(SegmentInfos segmentInfos, int maxSegmentCount, IDictionary <SegmentCommitInfo, bool?> segmentsToMerge) { IList <SegmentCommitInfo> eligibleSegments = new List <SegmentCommitInfo>(); foreach (SegmentCommitInfo info in segmentInfos.Segments) { if (segmentsToMerge.ContainsKey(info)) { eligibleSegments.Add(info); } } //System.out.println("MRMP: findMerges sis=" + segmentInfos + " eligible=" + eligibleSegments); MergeSpecification mergeSpec = null; if (eligibleSegments.Count > 1 || (eligibleSegments.Count == 1 && eligibleSegments[0].HasDeletions)) { mergeSpec = new MergeSpecification(); // Already shuffled having come out of a set but // shuffle again for good measure: eligibleSegments.Shuffle(random); int upto = 0; while (upto < eligibleSegments.Count) { int max = Math.Min(10, eligibleSegments.Count - upto); int inc = max <= 2 ? max : TestUtil.NextInt32(random, 2, max); mergeSpec.Add(new OneMerge(eligibleSegments.SubList(upto, upto + inc))); upto += inc; } } if (mergeSpec != null) { foreach (OneMerge merge in mergeSpec.Merges) { foreach (SegmentCommitInfo info in merge.Segments) { if (Debugging.AssertsEnabled) { Debugging.Assert(segmentsToMerge.ContainsKey(info)); } } } } return(mergeSpec); }
private void BuildActions(MergeSpecification specification) { if (_merge.Option.HasInsert) { BuildInsertAction(specification); } if (_merge.Option.HasUpdate) { BuildUpdateAction(specification); } if (_merge.Option.HasDelete) { BuildDeleteAction(specification); } }
public void SetInlineTableData(MergeSpecification specification) { var table = specification.TableReference as InlineDerivedTable; if (null == table) { throw new NotImplementedException("only support merges from inline table reference"); } table.RowValues.Clear(); foreach (DataRow row in _merge.Data.Rows) { if (row.RowState == DataRowState.Deleted) { continue; } if (row.HasErrors) { continue; } var rowValue = new RowValue(); var colIndex = 0; foreach (var col in _merge.Table.Columns) { var value = row[col.Name.GetName()]; if (value == DBNull.Value) { rowValue.ColumnValues.Add(new NullLiteral()); } else { var actualColumn = _merge.Data.Columns[col.Name.GetName()]; rowValue.ColumnValues.Add(GetColumnValue(GetStringRepresentation(value, actualColumn), col.DataType, col.IsNText)); } colIndex++; } table.RowValues.Add(rowValue); } }
/// <summary> /// Returns the merges necessary to merge the index, taking the max merge /// size or max merge docs into consideration. this method attempts to respect /// the <paramref name="maxNumSegments"/> parameter, however it might be, due to size /// constraints, that more than that number of segments will remain in the /// index. Also, this method does not guarantee that exactly /// <paramref name="maxNumSegments"/> will remain, but <= that number. /// </summary> private MergeSpecification FindForcedMergesSizeLimit(SegmentInfos infos, int maxNumSegments, int last) { MergeSpecification spec = new MergeSpecification(); IList <SegmentCommitInfo> segments = infos.AsList(); int start = last - 1; while (start >= 0) { SegmentCommitInfo info = infos.Info(start); if (Size(info) > m_maxMergeSizeForForcedMerge || SizeDocs(info) > m_maxMergeDocs) { if (IsVerbose) { Message("findForcedMergesSizeLimit: skip segment=" + info + ": size is > maxMergeSize (" + m_maxMergeSizeForForcedMerge + ") or sizeDocs is > maxMergeDocs (" + m_maxMergeDocs + ")"); } // need to skip that segment + add a merge for the 'right' segments, // unless there is only 1 which is merged. if (last - start - 1 > 1 || (start != last - 1 && !IsMerged(infos, infos.Info(start + 1)))) { // there is more than 1 segment to the right of // this one, or a mergeable single segment. spec.Add(new OneMerge(segments.SubList(start + 1, last))); } last = start; } else if (last - start == m_mergeFactor) { // mergeFactor eligible segments were found, add them as a merge. spec.Add(new OneMerge(segments.SubList(start, last))); last = start; } --start; } // Add any left-over segments, unless there is just 1 // already fully merged if (last > 0 && (++start + 1 < last || !IsMerged(infos, infos.Info(start)))) { spec.Add(new OneMerge(segments.SubList(start, last))); } return(spec.Merges.Count == 0 ? null : spec); }
private void BuildSearchCondition(MergeSpecification specification) { if (_keyColumns.Count > 1) { BuildMultiKeySearchCondition(specification); return; } if (_keyColumns.Count == 0) { OutputWindowMessage.WriteMessage("The table: {0} does not contain a primary key so it isn't possible to work out what the columns the merge should check.", _targetTableName); CreateSearchConditionForTableWithNoKeys((specification.SearchCondition = new BooleanComparisonExpression()) as BooleanComparisonExpression); return; } CreateSearchCondition(_keyColumns[0], (specification.SearchCondition = new BooleanComparisonExpression()) as BooleanComparisonExpression); }
private void BuildSearchCondition(MergeSpecification specification) { if (_merge.Table.Columns.Count(p => p.IsKey) > 1) { BuildMultiKeySearchCondition(specification); return; } if (_merge.Table.Columns.Count(p => p.IsKey) == 0) { //OutputWindowMessage.WriteMessage("The table: {0} does not contain a primary key so it isn't possible to work out what the columns the merge should check.", // _targetTableName); CreateSearchConditionForTableWithNoKeys((specification.SearchCondition = new BooleanComparisonExpression()) as BooleanComparisonExpression); return; } CreateSearchCondition(_merge.Table.Columns.FirstOrDefault(p => p.IsKey).Name.GetName(), (specification.SearchCondition = new BooleanComparisonExpression()) as BooleanComparisonExpression); }
public static IEnumerable <SchemaObjectReference> GetSchemaObjectReferences( this MergeSpecification mergeSpecification, ILogger logger, SchemaFile file ) { var targetReference = mergeSpecification .Target .GetSchemaObjectReferences(logger, file) .First(); targetReference.Alias = targetReference.Alias ?? mergeSpecification.TableAlias?.Value; var tableReferenceReferences = mergeSpecification .TableReference .GetSchemaObjectReferences(logger, file) .ToList(); var outputIntoReferences = new List <SchemaObjectReference>() { new SchemaObjectReference() { Alias = "inserted", Identifier = targetReference.Identifier, Value = targetReference.Value }, new SchemaObjectReference() { Alias = "deleted", Identifier = targetReference.Identifier, Value = targetReference.Value } }; return(new List <SchemaObjectReference>() { targetReference } .Concat(tableReferenceReferences) .Concat(outputIntoReferences) .ToList()); }
public override MergeSpecification FindForcedMerges(SegmentInfos segmentInfos, int maxSegmentCount, IDictionary<SegmentCommitInfo, bool?> segmentsToMerge) { IList<SegmentCommitInfo> eligibleSegments = new List<SegmentCommitInfo>(); foreach (SegmentCommitInfo info in segmentInfos.Segments) { if (segmentsToMerge.ContainsKey(info)) { eligibleSegments.Add(info); } } //System.out.println("MRMP: findMerges sis=" + segmentInfos + " eligible=" + eligibleSegments); MergeSpecification mergeSpec = null; if (eligibleSegments.Count > 1 || (eligibleSegments.Count == 1 && eligibleSegments[0].HasDeletions())) { mergeSpec = new MergeSpecification(); // Already shuffled having come out of a set but // shuffle again for good measure: eligibleSegments = CollectionsHelper.Shuffle(eligibleSegments); int upto = 0; while (upto < eligibleSegments.Count) { int max = Math.Min(10, eligibleSegments.Count - upto); int inc = max <= 2 ? max : TestUtil.NextInt(Random, 2, max); mergeSpec.Add(new OneMerge(eligibleSegments.SubList(upto, upto + inc))); upto += inc; } } if (mergeSpec != null) { foreach (OneMerge merge in mergeSpec.Merges) { foreach (SegmentCommitInfo info in merge.Segments) { Debug.Assert(segmentsToMerge.ContainsKey(info)); } } } return mergeSpec; }
/// <summary> /// Finds merges necessary to expunge all deletes from the /// index. We simply merge adjacent segments that have /// deletes, up to mergeFactor at a time. /// </summary> public override MergeSpecification FindMergesToExpungeDeletes(SegmentInfos segmentInfos, IndexWriter writer) { this.writer = writer; int numSegments = segmentInfos.Count; Message("findMergesToExpungeDeletes: " + numSegments + " segments"); MergeSpecification spec = new MergeSpecification(); int firstSegmentWithDeletions = -1; for (int i = 0; i < numSegments; i++) { SegmentInfo info = segmentInfos.Info(i); if (info.HasDeletions()) { Message(" segment " + info.name + " has deletions"); if (firstSegmentWithDeletions == -1) firstSegmentWithDeletions = i; else if (i - firstSegmentWithDeletions == mergeFactor) { // We've seen mergeFactor segments in a row with // deletions, so force a merge now: Message(" add merge " + firstSegmentWithDeletions + " to " + (i - 1) + " inclusive"); spec.Add(new OneMerge(segmentInfos.Range(firstSegmentWithDeletions, i), useCompoundFile)); firstSegmentWithDeletions = i; } } else if (firstSegmentWithDeletions != -1) { // End of a sequence of segments with deletions, so, // merge those past segments even if it's fewer than // mergeFactor segments Message(" add merge " + firstSegmentWithDeletions + " to " + (i - 1) + " inclusive"); spec.Add(new OneMerge(segmentInfos.Range(firstSegmentWithDeletions, i), useCompoundFile)); firstSegmentWithDeletions = -1; } } if (firstSegmentWithDeletions != -1) { Message(" add merge " + firstSegmentWithDeletions + " to " + (numSegments - 1) + " inclusive"); spec.Add(new OneMerge(segmentInfos.Range(firstSegmentWithDeletions, numSegments), useCompoundFile)); } return spec; }
/// <summary> /// Finds merges necessary to force-merge all deletes from the /// index. We simply merge adjacent segments that have /// deletes, up to mergeFactor at a time. /// </summary> public override MergeSpecification FindForcedDeletesMerges(SegmentInfos segmentInfos) { var segments = segmentInfos.AsList(); int numSegments = segments.Count; if (IsVerbose) { Message("findForcedDeleteMerges: " + numSegments + " segments"); } var spec = new MergeSpecification(); int firstSegmentWithDeletions = -1; IndexWriter w = m_writer.Get(); Debug.Assert(w != null); for (int i = 0; i < numSegments; i++) { SegmentCommitInfo info = segmentInfos.Info(i); int delCount = w.NumDeletedDocs(info); if (delCount > 0) { if (IsVerbose) { Message(" segment " + info.Info.Name + " has deletions"); } if (firstSegmentWithDeletions == -1) { firstSegmentWithDeletions = i; } else if (i - firstSegmentWithDeletions == m_mergeFactor) { // We've seen mergeFactor segments in a row with // deletions, so force a merge now: if (IsVerbose) { Message(" add merge " + firstSegmentWithDeletions + " to " + (i - 1) + " inclusive"); } spec.Add(new OneMerge(segments.SubList(firstSegmentWithDeletions, i))); firstSegmentWithDeletions = i; } } else if (firstSegmentWithDeletions != -1) { // End of a sequence of segments with deletions, so, // merge those past segments even if it's fewer than // mergeFactor segments if (IsVerbose) { Message(" add merge " + firstSegmentWithDeletions + " to " + (i - 1) + " inclusive"); } spec.Add(new OneMerge(segments.SubList(firstSegmentWithDeletions, i))); firstSegmentWithDeletions = -1; } } if (firstSegmentWithDeletions != -1) { if (IsVerbose) { Message(" add merge " + firstSegmentWithDeletions + " to " + (numSegments - 1) + " inclusive"); } spec.Add(new OneMerge(segments.SubList(firstSegmentWithDeletions, numSegments))); } return(spec); }
private void BuildActions(MergeSpecification specification) { BuildInsertAction(specification); BuildUpdateAction(specification); BuildDeleteAction(specification); }
private void SetTargetName(MergeSpecification specification) { var table = (specification.Target = new NamedTableReference()) as NamedTableReference; table.SchemaObject = (new SchemaObjectName().Create(_schemaName, _targetTableName) as SchemaObjectName); }
private void SetTableAlias(MergeSpecification specification) { specification.TableAlias = new Identifier { Value = MergeIdentifierStrings.TargetName }; }
public override MergeSpecification FindMerges(MergeTrigger? mergeTrigger, SegmentInfos segmentInfos) { MergeSpecification mergeSpec = null; //System.out.println("MRMP: findMerges sis=" + segmentInfos); int numSegments = segmentInfos.Size(); IList<SegmentCommitInfo> segments = new List<SegmentCommitInfo>(); ICollection<SegmentCommitInfo> merging = Writer.Get().MergingSegments; foreach (SegmentCommitInfo sipc in segmentInfos.Segments) { if (!merging.Contains(sipc)) { segments.Add(sipc); } } numSegments = segments.Count; if (numSegments > 1 && (numSegments > 30 || Random.Next(5) == 3)) { segments = CollectionsHelper.Shuffle(segments); // TODO: sometimes make more than 1 merge? mergeSpec = new MergeSpecification(); int segsToMerge = TestUtil.NextInt(Random, 1, numSegments); mergeSpec.Add(new OneMerge(segments.SubList(0, segsToMerge))); } return mergeSpec; }
private void BuildSearchCondition(MergeSpecification specification) { if (_merge.Table.Columns.Count(p => p.IsKey) > 1) { BuildMultiKeySearchCondition(specification); return; } if (_merge.Table.Columns.Count(p => p.IsKey) == 0) { //OutputWindowMessage.WriteMessage("The table: {0} does not contain a primary key so it isn't possible to work out what the columns the merge should check.", // _targetTableName); CreateSearchConditionForTableWithNoKeys((specification.SearchCondition = new BooleanComparisonExpression()) as BooleanComparisonExpression); return; } CreateSearchCondition(_merge.Table.Columns.FirstOrDefault(p=>p.IsKey).Name.GetName(), (specification.SearchCondition = new BooleanComparisonExpression()) as BooleanComparisonExpression); }
private void BuildInsertAction(MergeSpecification specification) { var action = new InsertMergeAction(); var insertSource = action.Source = new ValuesInsertSource(); var row = new RowValue(); foreach (var column in _merge.Table.Columns) { var colRef = new ColumnReferenceExpression(); colRef.ColumnType = ColumnType.Regular; colRef.MultiPartIdentifier = MultiPartIdentifierBuilder.Get(column.Name.GetName()); action.Columns.Add(colRef); colRef = new ColumnReferenceExpression(); colRef.ColumnType = ColumnType.Regular; colRef.MultiPartIdentifier = MultiPartIdentifierBuilder.Get(MergeIdentifierStrings.SourceName, column.Name.GetName()); row.ColumnValues.Add(colRef); } insertSource.RowValues.Add(row); specification.ActionClauses.Add(new MergeActionClause { Action = action, Condition = MergeCondition.NotMatchedByTarget }); }
private void BuildMultiKeySearchCondition(MergeSpecification specification) { var comparisons = new List<BooleanComparisonExpression>(); foreach (var column in _merge.Table.Columns.Where(p => p.IsKey)) { var condition = new BooleanComparisonExpression(); comparisons.Add(CreateSearchCondition(column.Name.GetName(), condition)); } specification.SearchCondition = CreateExpressionTreeForMultiKeySearchConditon(comparisons).First(); }
private void SetTargetName(MergeSpecification specification) { var table = (specification.Target = new NamedTableReference()) as NamedTableReference; table.SchemaObject = _merge.Table.Name.ToSchemaObjectName(); }
private void SetInlineTableReference(MergeSpecification specification) { var table = (specification.TableReference = new InlineDerivedTable()) as InlineDerivedTable; table.Alias = new Identifier { Value = MergeIdentifierStrings.SourceName }; foreach (var col in _merge.Table.Columns) { table.Columns.Add(col.Name.ToIdentifier()); } }
public void SetInlineTableData(MergeSpecification specification) { var table = specification.TableReference as InlineDerivedTable; if (null == table) throw new NotImplementedException("only support merges from inline table reference"); table.RowValues.Clear(); foreach (DataRow row in _merge.Data.Rows) { if (row.RowState == DataRowState.Deleted) continue; if (row.HasErrors) continue; var rowValue = new RowValue(); foreach (var col in _merge.Table.Columns) { var value = row[col.Name.GetName()]; if (value == DBNull.Value) { rowValue.ColumnValues.Add(new NullLiteral()); } else { rowValue.ColumnValues.Add(GetColumnValue(value.ToString(), col.DataType, col.IsNText)); } } table.RowValues.Add(rowValue); } }
public override void Visit(MergeSpecification node) { this.action(node); }
private void BuildActions(MergeSpecification specification) { if(_merge.Option.HasInsert) BuildInsertAction(specification); if (_merge.Option.HasUpdate) BuildUpdateAction(specification); if (_merge.Option.HasDelete) BuildDeleteAction(specification); }
/// <summary>Returns the merges necessary to optimize the index. /// This merge policy defines "optimized" to mean only one /// segment in the index, where that segment has no /// deletions pending nor separate norms, and it is in /// compound file format if the current useCompoundFile /// setting is true. This method returns multiple merges /// (mergeFactor at a time) so the {@link MergeScheduler} /// in use may make use of concurrency. /// </summary> public override MergeSpecification FindMergesForOptimize(SegmentInfos infos, int maxNumSegments, System.Collections.Hashtable segmentsToOptimize) { MergeSpecification spec; System.Diagnostics.Debug.Assert(maxNumSegments > 0); if (!IsOptimized(infos, maxNumSegments, segmentsToOptimize)) { // Find the newest (rightmost) segment that needs to // be optimized (other segments may have been flushed // since optimize started): int last = infos.Count; while (last > 0) { SegmentInfo info = infos.Info(--last); if (segmentsToOptimize.Contains(info)) { last++; break; } } if (last > 0) { spec = new MergeSpecification(); // First, enroll all "full" merges (size // mergeFactor) to potentially be run concurrently: while (last - maxNumSegments + 1 >= mergeFactor) { spec.Add(MakeOneMerge(infos, infos.Range(last - mergeFactor, last))); last -= mergeFactor; } // Only if there are no full merges pending do we // add a final partial (< mergeFactor segments) merge: if (0 == spec.merges.Count) { if (maxNumSegments == 1) { // Since we must optimize down to 1 segment, the // choice is simple: if (last > 1 || !IsOptimized(infos.Info(0))) spec.Add(MakeOneMerge(infos, infos.Range(0, last))); } else if (last > maxNumSegments) { // Take care to pick a partial merge that is // least cost, but does not make the index too // lopsided. If we always just picked the // partial tail then we could produce a highly // lopsided index over time: // We must merge this many segments to leave // maxNumSegments in the index (from when // optimize was first kicked off): int finalMergeSize = last - maxNumSegments + 1; // Consider all possible starting points: long bestSize = 0; int bestStart = 0; for (int i = 0; i < last - finalMergeSize + 1; i++) { long sumSize = 0; for (int j = 0; j < finalMergeSize; j++) sumSize += Size(infos.Info(j + i)); if (i == 0 || (sumSize < 2 * Size(infos.Info(i - 1)) && sumSize < bestSize)) { bestStart = i; bestSize = sumSize; } } spec.Add(MakeOneMerge(infos, infos.Range(bestStart, bestStart + finalMergeSize))); } } } else spec = null; } else spec = null; return spec; }
public override MergeSpecification FindMerges(MergeTrigger? mergeTrigger, SegmentInfos segmentInfos) { MergeSpecification ms = new MergeSpecification(); if (DoMerge) { OneMerge om = new OneMerge(segmentInfos.AsList().SubList(Start, Start + Length)); ms.Add(om); DoMerge = false; return ms; } return null; }
/// <summary> Finds merges necessary to expunge all deletes from the /// index. We simply merge adjacent segments that have /// deletes, up to mergeFactor at a time. /// </summary> public override MergeSpecification FindMergesToExpungeDeletes(SegmentInfos segmentInfos) { int numSegments = segmentInfos.Count; if (Verbose()) Message("findMergesToExpungeDeletes: " + numSegments + " segments"); MergeSpecification spec = new MergeSpecification(); int firstSegmentWithDeletions = - 1; for (int i = 0; i < numSegments; i++) { SegmentInfo info = segmentInfos.Info(i); int delCount = writer.NumDeletedDocs(info); if (delCount > 0) { if (Verbose()) Message(" segment " + info.name + " has deletions"); if (firstSegmentWithDeletions == - 1) firstSegmentWithDeletions = i; else if (i - firstSegmentWithDeletions == mergeFactor) { // We've seen mergeFactor segments in a row with // deletions, so force a merge now: if (Verbose()) Message(" add merge " + firstSegmentWithDeletions + " to " + (i - 1) + " inclusive"); spec.Add(MakeOneMerge(segmentInfos, segmentInfos.Range(firstSegmentWithDeletions, i))); firstSegmentWithDeletions = i; } } else if (firstSegmentWithDeletions != - 1) { // End of a sequence of segments with deletions, so, // merge those past segments even if it's fewer than // mergeFactor segments if (Verbose()) Message(" add merge " + firstSegmentWithDeletions + " to " + (i - 1) + " inclusive"); spec.Add(MakeOneMerge(segmentInfos, segmentInfos.Range(firstSegmentWithDeletions, i))); firstSegmentWithDeletions = - 1; } } if (firstSegmentWithDeletions != - 1) { if (Verbose()) Message(" add merge " + firstSegmentWithDeletions + " to " + (numSegments - 1) + " inclusive"); spec.Add(MakeOneMerge(segmentInfos, segmentInfos.Range(firstSegmentWithDeletions, numSegments))); } return spec; }
private MergeSpecification sortedMergeSpecification(MergeSpecification specification) { if (specification == null) { return null; } MergeSpecification sortingSpec = new SortingMergeSpecification(this); foreach (OneMerge merge in specification.merges) { sortingSpec.add(merge); } return sortingSpec; }
/// <summary>Checks if any merges are now necessary and returns a /// {@link MergePolicy.MergeSpecification} if so. A merge /// is necessary when there are more than {@link /// #setMergeFactor} segments at a given level. When /// multiple levels have too many segments, this method /// will return multiple merges, allowing the {@link /// MergeScheduler} to use concurrency. /// </summary> public override MergeSpecification FindMerges(SegmentInfos infos) { int numSegments = infos.Count; if (Verbose()) Message("findMerges: " + numSegments + " segments"); // Compute levels, which is just log (base mergeFactor) // of the size of each segment float[] levels = new float[numSegments]; float norm = (float) System.Math.Log(mergeFactor); for (int i = 0; i < numSegments; i++) { SegmentInfo info = infos.Info(i); long size = Size(info); // Floor tiny segments if (size < 1) size = 1; levels[i] = (float) System.Math.Log(size) / norm; } float levelFloor; if (minMergeSize <= 0) levelFloor = (float) 0.0; else { levelFloor = (float) (System.Math.Log(minMergeSize) / norm); } // Now, we quantize the log values into levels. The // first level is any segment whose log size is within // LEVEL_LOG_SPAN of the max size, or, who has such as // segment "to the right". Then, we find the max of all // other segments and use that to define the next level // segment, etc. MergeSpecification spec = null; int start = 0; while (start < numSegments) { // Find max level of all segments not already // quantized. float maxLevel = levels[start]; for (int i = 1 + start; i < numSegments; i++) { float level = levels[i]; if (level > maxLevel) maxLevel = level; } // Now search backwards for the rightmost segment that // falls into this level: float levelBottom; if (maxLevel < levelFloor) // All remaining segments fall into the min level levelBottom = - 1.0F; else { levelBottom = (float) (maxLevel - LEVEL_LOG_SPAN); // Force a boundary at the level floor if (levelBottom < levelFloor && maxLevel >= levelFloor) levelBottom = levelFloor; } int upto = numSegments - 1; while (upto >= start) { if (levels[upto] >= levelBottom) { break; } upto--; } if (Verbose()) Message(" level " + levelBottom + " to " + maxLevel + ": " + (1 + upto - start) + " segments"); // Finally, record all merges that are viable at this level: int end = start + mergeFactor; while (end <= 1 + upto) { bool anyTooLarge = false; for (int i = start; i < end; i++) { SegmentInfo info = infos.Info(i); anyTooLarge |= (Size(info) >= maxMergeSize || SizeDocs(info) >= maxMergeDocs); } if (!anyTooLarge) { if (spec == null) spec = new MergeSpecification(); if (Verbose()) Message(" " + start + " to " + end + ": add this merge"); spec.Add(MakeOneMerge(infos, infos.Range(start, end))); } else if (Verbose()) Message(" " + start + " to " + end + ": contains segment over maxMergeSize or maxMergeDocs; skipping"); start = end; end = start + mergeFactor; } start = 1 + upto; } return spec; }
private void BuildMultiKeySearchCondition(MergeSpecification specification) { var comparisons = new List<BooleanComparisonExpression>(); foreach (var column in _keyColumns) { var condition = new BooleanComparisonExpression(); comparisons.Add(CreateSearchCondition(column, condition)); } specification.SearchCondition = CreateExpressionTreeForMultiKeySearchConditon(comparisons).First(); }
public override MergeSpecification FindForcedMerges(SegmentInfos infos, int maxSegmentCount, IDictionary <SegmentCommitInfo, bool?> segmentsToMerge) { if (Verbose()) { Message("findForcedMerges maxSegmentCount=" + maxSegmentCount + " infos=" + Writer.Get().SegString(infos.Segments) + " segmentsToMerge=" + segmentsToMerge); } List <SegmentCommitInfo> eligible = new List <SegmentCommitInfo>(); bool forceMergeRunning = false; ICollection <SegmentCommitInfo> merging = Writer.Get().MergingSegments; bool?segmentIsOriginal = false; foreach (SegmentCommitInfo info in infos.Segments) { bool?isOriginal = segmentsToMerge[info]; if (isOriginal != null) { segmentIsOriginal = isOriginal; if (!merging.Contains(info)) { eligible.Add(info); } else { forceMergeRunning = true; } } } if (eligible.Count == 0) { return(null); } if ((maxSegmentCount > 1 && eligible.Count <= maxSegmentCount) || (maxSegmentCount == 1 && eligible.Count == 1 && (segmentIsOriginal == false || IsMerged(infos, eligible[0])))) { if (Verbose()) { Message("already merged"); } return(null); } eligible.Sort(new SegmentByteSizeDescending(this)); if (Verbose()) { Message("eligible=" + eligible); Message("forceMergeRunning=" + forceMergeRunning); } int end = eligible.Count; MergeSpecification spec = null; // Do full merges, first, backwards: while (end >= MaxMergeAtOnceExplicit_Renamed + maxSegmentCount - 1) { if (spec == null) { spec = new MergeSpecification(); } OneMerge merge = new OneMerge(eligible.SubList(end - MaxMergeAtOnceExplicit_Renamed, end)); if (Verbose()) { Message("add merge=" + Writer.Get().SegString(merge.Segments)); } spec.Add(merge); end -= MaxMergeAtOnceExplicit_Renamed; } if (spec == null && !forceMergeRunning) { // Do final merge int numToMerge = end - maxSegmentCount + 1; OneMerge merge = new OneMerge(eligible.SubList(end - numToMerge, end)); if (Verbose()) { Message("add final merge=" + merge.SegString(Writer.Get().Directory)); } spec = new MergeSpecification(); spec.Add(merge); } return(spec); }
/// <summary> /// Returns the merges necessary to <see cref="IndexWriter.ForceMerge(int)"/> the index. this method constraints /// the returned merges only by the <paramref name="maxNumSegments"/> parameter, and /// guaranteed that exactly that number of segments will remain in the index. /// </summary> private MergeSpecification FindForcedMergesMaxNumSegments(SegmentInfos infos, int maxNumSegments, int last) { var spec = new MergeSpecification(); var segments = infos.AsList(); // First, enroll all "full" merges (size // mergeFactor) to potentially be run concurrently: while (last - maxNumSegments + 1 >= m_mergeFactor) { spec.Add(new OneMerge(segments.SubList(last - m_mergeFactor, last))); last -= m_mergeFactor; } // Only if there are no full merges pending do we // add a final partial (< mergeFactor segments) merge: if (0 == spec.Merges.Count) { if (maxNumSegments == 1) { // Since we must merge down to 1 segment, the // choice is simple: if (last > 1 || !IsMerged(infos, infos.Info(0))) { spec.Add(new OneMerge(segments.SubList(0, last))); } } else if (last > maxNumSegments) { // Take care to pick a partial merge that is // least cost, but does not make the index too // lopsided. If we always just picked the // partial tail then we could produce a highly // lopsided index over time: // We must merge this many segments to leave // maxNumSegments in the index (from when // forceMerge was first kicked off): int finalMergeSize = last - maxNumSegments + 1; // Consider all possible starting points: long bestSize = 0; int bestStart = 0; for (int i = 0; i < last - finalMergeSize + 1; i++) { long sumSize = 0; for (int j = 0; j < finalMergeSize; j++) { sumSize += Size(infos.Info(j + i)); } if (i == 0 || (sumSize < 2 * Size(infos.Info(i - 1)) && sumSize < bestSize)) { bestStart = i; bestSize = sumSize; } } spec.Add(new OneMerge(segments.SubList(bestStart, bestStart + finalMergeSize))); } } return(spec.Merges.Count == 0 ? null : spec); }
/// <summary> /// Checks if any merges are now necessary and returns a /// <see cref="MergePolicy.MergeSpecification"/> if so. A merge /// is necessary when there are more than /// <see cref="MergeFactor"/> segments at a given level. When /// multiple levels have too many segments, this method /// will return multiple merges, allowing the /// <see cref="MergeScheduler"/> to use concurrency. /// </summary> public override MergeSpecification FindMerges(MergeTrigger mergeTrigger, SegmentInfos infos) { int numSegments = infos.Count; if (IsVerbose) { Message("findMerges: " + numSegments + " segments"); } // Compute levels, which is just log (base mergeFactor) // of the size of each segment IList <SegmentInfoAndLevel> levels = new List <SegmentInfoAndLevel>(); var norm = (float)Math.Log(m_mergeFactor); ICollection <SegmentCommitInfo> mergingSegments = m_writer.Get().MergingSegments; for (int i = 0; i < numSegments; i++) { SegmentCommitInfo info = infos.Info(i); long size = Size(info); // Floor tiny segments if (size < 1) { size = 1; } SegmentInfoAndLevel infoLevel = new SegmentInfoAndLevel(info, (float)Math.Log(size) / norm, i); levels.Add(infoLevel); if (IsVerbose) { long segBytes = SizeBytes(info); string extra = mergingSegments.Contains(info) ? " [merging]" : ""; if (size >= m_maxMergeSize) { extra += " [skip: too large]"; } Message("seg=" + m_writer.Get().SegString(info) + " level=" + infoLevel.level + " size=" + String.Format(CultureInfo.InvariantCulture, "{0:0.00} MB", segBytes / 1024 / 1024.0) + extra); } } float levelFloor; if (m_minMergeSize <= 0) { levelFloor = (float)0.0; } else { levelFloor = (float)(Math.Log(m_minMergeSize) / norm); } // Now, we quantize the log values into levels. The // first level is any segment whose log size is within // LEVEL_LOG_SPAN of the max size, or, who has such as // segment "to the right". Then, we find the max of all // other segments and use that to define the next level // segment, etc. MergeSpecification spec = null; int numMergeableSegments = levels.Count; int start = 0; while (start < numMergeableSegments) { // Find max level of all segments not already // quantized. float maxLevel = levels[start].level; for (int i = 1 + start; i < numMergeableSegments; i++) { float level = levels[i].level; if (level > maxLevel) { maxLevel = level; } } // Now search backwards for the rightmost segment that // falls into this level: float levelBottom; if (maxLevel <= levelFloor) { // All remaining segments fall into the min level levelBottom = -1.0F; } else { levelBottom = (float)(maxLevel - LEVEL_LOG_SPAN); // Force a boundary at the level floor if (levelBottom < levelFloor && maxLevel >= levelFloor) { levelBottom = levelFloor; } } int upto = numMergeableSegments - 1; while (upto >= start) { if (levels[upto].level >= levelBottom) { break; } upto--; } if (IsVerbose) { Message(" level " + levelBottom.ToString("0.0") + " to " + maxLevel.ToString("0.0") + ": " + (1 + upto - start) + " segments"); } // Finally, record all merges that are viable at this level: int end = start + m_mergeFactor; while (end <= 1 + upto) { bool anyTooLarge = false; bool anyMerging = false; for (int i = start; i < end; i++) { SegmentCommitInfo info = levels[i].info; anyTooLarge |= (Size(info) >= m_maxMergeSize || SizeDocs(info) >= m_maxMergeDocs); if (mergingSegments.Contains(info)) { anyMerging = true; break; } } if (anyMerging) { // skip } else if (!anyTooLarge) { if (spec == null) { spec = new MergeSpecification(); } IList <SegmentCommitInfo> mergeInfos = new List <SegmentCommitInfo>(); for (int i = start; i < end; i++) { mergeInfos.Add(levels[i].info); Debug.Assert(infos.Contains(levels[i].info)); } if (IsVerbose) { Message(" add merge=" + m_writer.Get().SegString(mergeInfos) + " start=" + start + " end=" + end); } spec.Add(new OneMerge(mergeInfos)); } else if (IsVerbose) { Message(" " + start + " to " + end + ": contains segment over maxMergeSize or maxMergeDocs; skipping"); } start = end; end = start + m_mergeFactor; } start = 1 + upto; } return(spec); }
public override MergeSpecification FindForcedMerges(SegmentInfos segmentInfos, int maxSegmentCount, IDictionary<SegmentCommitInfo, bool?> segmentsToMerge) { // first find all old segments IDictionary<SegmentCommitInfo, bool?> oldSegments = new Dictionary<SegmentCommitInfo, bool?>(); foreach (SegmentCommitInfo si in segmentInfos.Segments) { bool? v = segmentsToMerge[si]; if (v != null && ShouldUpgradeSegment(si)) { oldSegments[si] = v; } } if (Verbose()) { Message("findForcedMerges: segmentsToUpgrade=" + oldSegments); } if (oldSegments.Count == 0) { return null; } MergeSpecification spec = @base.FindForcedMerges(segmentInfos, maxSegmentCount, oldSegments); if (spec != null) { // remove all segments that are in merge specification from oldSegments, // the resulting set contains all segments that are left over // and will be merged to one additional segment: foreach (OneMerge om in spec.Merges) { foreach (SegmentCommitInfo sipc in om.Segments) { oldSegments.Remove(sipc); } } } if (oldSegments.Count > 0) { if (Verbose()) { Message("findForcedMerges: " + @base.GetType().Name + " does not want to merge all old segments, merge remaining ones into new segment: " + oldSegments); } IList<SegmentCommitInfo> newInfos = new List<SegmentCommitInfo>(); foreach (SegmentCommitInfo si in segmentInfos.Segments) { if (oldSegments.ContainsKey(si)) { newInfos.Add(si); } } // add the final merge if (spec == null) { spec = new MergeSpecification(); } spec.Add(new OneMerge(newInfos)); } return spec; }
public override void ExplicitVisit(MergeSpecification fragment) { _fragments.Add(fragment); }