public void Update(Di4.B keyBookmark, RegionType regionType) { switch (regionType) { case RegionType.Candidate: return; case RegionType.Designated: foreach (var lambda in keyBookmark.lambda) { if (lambda.phi == Phi.LeftEnd) { _designatedRegions[_designatedRegions.Count - 1].lambdas.Add(lambda.atI, Phi.LeftEnd); } else { UpdateDesignatedRegions(lambda.atI); } } break; case RegionType.Decomposition: foreach (var lambda in keyBookmark.lambda) { if (lambda.phi == Phi.LeftEnd) { _tLambdas.Add(lambda.atI, Phi.LeftEnd); } else if (!_tLambdas.Remove(lambda.atI)) { UpdateDesignatedRegions(lambda.atI); } } break; } }
public void Close(C rightEnd, Di4.B keyBookmark) { _designatedRegions[_designatedRegions.Count - 1] = _designatedRegions[_designatedRegions.Count - 1].UpdateRight(rightEnd); foreach (var lambda in keyBookmark.lambda) { if (lambda.phi == Phi.LeftEnd) { _tLambdas.Add(lambda.atI); } else { if (_decomposedIntervals.AddOrUpdate(lambda.atI, new DecomposerValue <C>( leftDesignatedRegion: 0, rightDesignatedRegion: _designatedRegions.Count - 1), RightEndUpdateFunction)) { _mu--; } } } _regionType = RegionType.Decomposition; if (_mu == 0) { Conclude(); } }
public void Open(C leftEnd, Di4.B keyBookmark) { //_tDecomposedIntervalsCount = _decomposedIntervals.Count; _regionType = RegionType.Designated; _designatedRegions.Add(new DesignatedRegionNEW <C>(left: leftEnd, right: leftEnd)); foreach (var lambda in keyBookmark.lambda) { if (lambda.phi == Phi.LeftEnd) { /// Note: /// If there exist any two (or more) intervals with /// same hash key, it is possible that this call throws /// exception. However, the modification I made on /// atomic dictionary would not throw an exception, /// rather replaces old value with the given value. /// Logically, this is wrong! but garbage in, garbag /// out. If this behavior is not intended, the hash /// keys must be the true unique identifier. _decomposedIntervals.Add( lambda.atI, new DecomposerValue <C>( leftDesignatedRegion: _designatedRegions.Count - 1, rightDesignatedRegion: _designatedRegions.Count - 1)); } else { if (!_tLambdas.Remove(lambda.atI) && _designatedRegions.Count > 1) { if (_decomposedIntervals.AddOrUpdate(lambda.atI, new DecomposerValue <C>( leftDesignatedRegion: 0, rightDesignatedRegion: _designatedRegions.Count - 2), RightEndUpdateFunction)) { _mu--; } } } } foreach (var atI in _tLambdas) { _decomposedIntervals.Add(atI, new DecomposerValue <C>( leftDesignatedRegion: _designatedRegions.Count - 1, rightDesignatedRegion: _designatedRegions.Count - 1)); } if (_updateMu) { _mu = keyBookmark.mu - _tLambdas.Count;// - _tDecomposedIntervalsCount; _updateMu = false; } _tLambdas.Clear(); }
public void Close(C rightEnd, Di4.B keyBookmark) { _designatedRegions[_designatedRegions.Count - 1].rightEnd = rightEnd; foreach (var lambda in keyBookmark.lambda) { if (lambda.phi == Phi.LeftEnd) { _tLambdas.Add(lambda.atI, Phi.LeftEnd); } else { UpdateDesignatedRegions(lambda.atI); } } }
public void Open(C leftEnd, Di4.B keyBookmark) { var newDesignatedRegion = new DesignatedRegion <C>(); newDesignatedRegion.leftEnd = leftEnd; newDesignatedRegion.mu = keyBookmark.mu - _tLambdas.Count; foreach (var lambda in _tLambdas) { newDesignatedRegion.lambdas.Add(lambda.Key, Phi.LeftEnd); } _tLambdas.Clear(); foreach (var lambda in keyBookmark.lambda) { if (lambda.phi == Phi.LeftEnd) { newDesignatedRegion.lambdas.Add(lambda.atI, Phi.LeftEnd); } else { UpdateDesignatedRegions(lambda.atI); } } if (_designatedRegions.Count > 0) { foreach (var lambda in _designatedRegions[_designatedRegions.Count - 1].lambdas) { if (lambda.Value == Phi.LeftEnd) { newDesignatedRegion.lambdas.Add(lambda.Key, Phi.LeftEnd); newDesignatedRegion.mu--; } } Conclude(); } _designatedRegions.Add(newDesignatedRegion); }
public void Update(Di4.B keyBookmark) { switch (_regionType) { case RegionType.Candidate: foreach (var lambda in keyBookmark.lambda) { if (lambda.phi == Phi.LeftEnd) { _tLambdas.Add(lambda.atI); } else { _tLambdas.Remove(lambda.atI); } } return; case RegionType.Designated: foreach (var lambda in keyBookmark.lambda) { if (lambda.phi == Phi.LeftEnd) { /// Note: /// If there exist any two (or more) intervals with /// same hash key, it is possible that this call throws /// exception. However, the modification I made on /// atomic dictionary would not throw an exception, /// rather replaces old value with the given value. /// Logically, this is wrong! but garbage in, garbag /// out. If this behavior is not intended, the hash /// keys must be the true unique identifier. _decomposedIntervals.Add(lambda.atI, new DecomposerValue <C>( leftDesignatedRegion: _designatedRegions.Count - 1, rightDesignatedRegion: _designatedRegions.Count - 1)); } else { if (_decomposedIntervals.AddOrUpdate(lambda.atI, new DecomposerValue <C>( leftDesignatedRegion: _designatedRegions.Count - 1, rightDesignatedRegion: _designatedRegions.Count - 1), RightEndUpdateFunction)) { _mu--; } } } break; case RegionType.Decomposition: foreach (var lambda in keyBookmark.lambda) { if (lambda.phi == Phi.LeftEnd) { _tLambdas.Add(lambda.atI); } else if (!_tLambdas.Remove(lambda.atI)) { if (_decomposedIntervals.AddOrUpdate(lambda.atI, new DecomposerValue <C>( leftDesignatedRegion: 0, rightDesignatedRegion: _designatedRegions.Count - 1), RightEndUpdateFunction)) { _mu--; } } } break; } }