/// <summary> /// Attempts to get non-full segment /// </summary> /// <returns>Non-full tail segment if found (null otherwise)</returns> private DiskQueueSegmentWrapper <T> TryGetNonFullTailSegment() { DiskQueueSegmentWrapper <T> result = _tailSegment; if (!result.IsFull) { return(result); } bool lockTaken = false; bool segmentAllocated = false; try { Monitor.Enter(_segmentOperationsLock, ref lockTaken); result = _tailSegment; // Should retry inside lock if (!result.IsFull) { return(result); } if (_segments.Count < _maxSegmentCount) { result = AllocateNewSegment(); segmentAllocated = true; return(result); } } finally { if (lockTaken) { Monitor.Exit(_segmentOperationsLock); } if (segmentAllocated) // Segment was allocated => should notify all waiters outside the lock { _addMonitor.PulseAll(); } } return(null); }
/// <summary> /// Moves head to the next non-completed segment /// </summary> /// <returns>New head segment</returns> private DiskQueueSegmentWrapper <T> MoveHeadToNonCompletedSegment() { Debug.Assert(Monitor.IsEntered(_segmentOperationsLock)); DiskQueueSegmentWrapper <T> curHeadSegment = _headSegment; if (curHeadSegment.NextSegment == null || !curHeadSegment.IsCompleted) { return(curHeadSegment); } curHeadSegment = curHeadSegment.NextSegment; while (curHeadSegment.NextSegment != null && curHeadSegment.IsCompleted) { curHeadSegment = curHeadSegment.NextSegment; } _headSegment = curHeadSegment; VerifyConsistency(); return(curHeadSegment); }
private void VerifyConsistency() { Debug.Assert(Monitor.IsEntered(_segmentOperationsLock), "segment lock is not acquired"); Debug.Assert(_headSegment != null, "_headSegment != null"); Debug.Assert(_tailSegment != null, "_tailSegment != null"); Debug.Assert(_segments.Count > 0, "_segments.Count > 0"); Debug.Assert(_tailSegment == _segments[_segments.Count - 1], "_tailSegment == _segments[_segments.Count - 1]"); bool beforeHead = true; DiskQueueSegmentWrapper <T> prevSegment = null; _segments.ForEach(curSegment => { if (prevSegment != null) { Debug.Assert(prevSegment.NextSegment == curSegment, "Linked-list of segments is broken"); } if (curSegment == _headSegment) { Debug.Assert(beforeHead, "Head segments met twice"); beforeHead = false; } if (beforeHead) { Debug.Assert(curSegment.IsCompleted, "All segements before head should be in IsCompleted state"); } if (curSegment != _tailSegment) { Debug.Assert(curSegment.IsFull, "All segements before tail should be in IsFull state"); } prevSegment = curSegment; }); Debug.Assert(!beforeHead, "HeadSegment is not found in the list of segments"); }
/// <summary> /// Creates a new segment. /// Attention: waiters notification is required after the segment allocation (<see cref="_addMonitor"/>) /// </summary> /// <returns>Created segment</returns> private DiskQueueSegmentWrapper <T> AllocateNewSegment() { CheckDisposed(); Debug.Assert(Monitor.IsEntered(_segmentOperationsLock)); Debug.Assert(!Monitor.IsEntered(_takeMonitor)); Debug.Assert(!Monitor.IsEntered(_peekMonitor)); var result = _segmentFactory.CreateSegmentWrapped(_segmentsPath, checked (++_lastSegmentNumber)); if (result == null) { throw new InvalidOperationException("CreateSegment returned null"); } Debug.Assert(result.Number == _lastSegmentNumber); _segments.Add(result); _tailSegment.NextSegment = result; _tailSegment = result; VerifyConsistency(); return(result); }
/// <summary> /// Attempts to find non-completed head segment /// </summary> /// <returns>Non-completed head segment if found (null otherwise)</returns> private DiskQueueSegmentWrapper <T> TryGetNonCompletedHeadSegment() { DiskQueueSegmentWrapper <T> result = _headSegment; if (!result.IsCompleted) { return(result); } if (_headSegment.NextSegment != null) { lock (_segmentOperationsLock) { result = _headSegment; // Retry inside lock if (!result.IsCompleted) { return(result); } // Search for not completed segment result = MoveHeadToNonCompletedSegment(); if (!result.IsCompleted) { // Perform compaction // Force compaction when we reach limit of available segments if ((!IsBackgroundCompactionEnabled || _segments.Count == _maxSegmentCount) && _headSegment != _segments[0]) { Compact(allowNotification: false); } return(result); } } } return(null); }
/// <summary> /// DiskQueue constructor /// </summary> /// <param name="path">Path to the folder on the disk to store queue segments</param> /// <param name="segmentFactory">Factory to create DiskQueueSegments</param> /// <param name="maxSegmentCount">Maximum number of simultaniously active segments</param> /// <param name="backgroundCompaction">Is background compaction allowed (if not then compaction happens synchronously within the Take operation)</param> /// <param name="compactionPeriod">Compaction period in milliseconds</param> internal DiskQueue(string path, DiskQueueSegmentFactory <T> segmentFactory, int maxSegmentCount, bool backgroundCompaction, int compactionPeriod) { if (string.IsNullOrEmpty(path)) { throw new ArgumentNullException(nameof(path)); } if (segmentFactory == null) { throw new ArgumentNullException(nameof(segmentFactory)); } if (compactionPeriod <= 0) { throw new ArgumentOutOfRangeException(nameof(compactionPeriod), "Compaction period should be positive"); } if (maxSegmentCount == 0 || maxSegmentCount == 1) { throw new ArgumentOutOfRangeException(nameof(maxSegmentCount), "At least two segments should be available"); } if (maxSegmentCount > int.MaxValue / 4) { throw new ArgumentOutOfRangeException(nameof(maxSegmentCount), "Segment count is too large"); } if (maxSegmentCount <= 0 || maxSegmentCount > int.MaxValue / 4) { maxSegmentCount = int.MaxValue / 4; } _addMonitor = new MonitorObject("DiskQueue.AddMonitor"); _takeMonitor = new MonitorObject("DiskQueue.TakeMonitor"); _peekMonitor = new MonitorObject("DiskQueue.PeekMonitor"); _segmentFactory = segmentFactory; _segmentOperationsLock = new object(); _maxSegmentCount = maxSegmentCount; _segmentsPath = path; _itemCount = 0; _boundedCapacity = -1; if (_segmentFactory.SegmentCapacity > 0) { _boundedCapacity = (long)_segmentFactory.SegmentCapacity * maxSegmentCount; } _nonFairItemThreshold = Environment.ProcessorCount; _nonFairSegmentThreshold = _segmentFactory.SegmentCapacity > 0 ? Math.Max(1, (8 * Environment.ProcessorCount) / _segmentFactory.SegmentCapacity) : 16; var discoveredSegments = segmentFactory.DiscoverSegmentsWrapped(path); _segments = new CircularList <DiskQueueSegmentWrapper <T> >(discoveredSegments.OrderBy(o => o.Number)); if (_segments.Count > 0) { for (int i = 0; i < _segments.Count; i++) { if (i + 1 < _segments.Count) { _segments[i].NextSegment = _segments[i + 1]; // Build linked-list if (_segments[i].Number == _segments[i + 1].Number) { throw new InvalidOperationException("DiscoverSegments returned duplicated segment numbers"); } } _itemCount += _segments[i].Count; } _headSegment = _segments[0]; _tailSegment = _segments[_segments.Count - 1]; _lastSegmentNumber = _tailSegment.Number; if (_tailSegment.IsFull) { // Allocate new segment when tail is Full (prevent write modifications of segments from previous run) var newTailSegment = segmentFactory.CreateSegmentWrapped(path, ++_lastSegmentNumber); _tailSegment.NextSegment = newTailSegment; _tailSegment = newTailSegment; _segments.Add(newTailSegment); } } else { // Allocate new segment _headSegment = _tailSegment = segmentFactory.CreateSegmentWrapped(path, ++_lastSegmentNumber); _segments.Add(_tailSegment); } _compactionPeriod = compactionPeriod; if (backgroundCompaction) { _backgroundCompactionThread = new DelegateThreadSetManager(1, this.GetType().GetCSName() + "_" + this.GetHashCode().ToString() + " Background compaction", BackgroundCompactionProc); _backgroundCompactionThread.IsBackground = true; _backgroundCompactionThread.Start(); } _isDisposed = false; }