예제 #1
0
 /// <summary>
 /// DiskQueue constructor
 /// </summary>
 /// <param name="path">Path to the folder on the disk to store queue segments</param>
 /// <param name="segmentFactory">Factory to create DiskQueueSegments</param>
 public DiskQueue(string path, DiskQueueSegmentFactory <T> segmentFactory)
     : this(path, segmentFactory, -1, false)
 {
 }
예제 #2
0
        /// <summary>
        /// DiskQueue constructor
        /// </summary>
        /// <param name="path">Path to the folder on the disk to store queue segments</param>
        /// <param name="segmentFactory">Factory to create DiskQueueSegments</param>
        /// <param name="maxSegmentCount">Maximum number of simultaniously active segments</param>
        /// <param name="backgroundCompaction">Is background compaction allowed (if not then compaction happens synchronously within the Take operation)</param>
        /// <param name="compactionPeriod">Compaction period in milliseconds</param>
        internal DiskQueue(string path, DiskQueueSegmentFactory <T> segmentFactory, int maxSegmentCount, bool backgroundCompaction, int compactionPeriod)
        {
            if (string.IsNullOrEmpty(path))
            {
                throw new ArgumentNullException(nameof(path));
            }
            if (segmentFactory == null)
            {
                throw new ArgumentNullException(nameof(segmentFactory));
            }
            if (compactionPeriod <= 0)
            {
                throw new ArgumentOutOfRangeException(nameof(compactionPeriod), "Compaction period should be positive");
            }
            if (maxSegmentCount == 0 || maxSegmentCount == 1)
            {
                throw new ArgumentOutOfRangeException(nameof(maxSegmentCount), "At least two segments should be available");
            }
            if (maxSegmentCount > int.MaxValue / 4)
            {
                throw new ArgumentOutOfRangeException(nameof(maxSegmentCount), "Segment count is too large");
            }

            if (maxSegmentCount <= 0 || maxSegmentCount > int.MaxValue / 4)
            {
                maxSegmentCount = int.MaxValue / 4;
            }

            _addMonitor  = new MonitorObject("DiskQueue.AddMonitor");
            _takeMonitor = new MonitorObject("DiskQueue.TakeMonitor");
            _peekMonitor = new MonitorObject("DiskQueue.PeekMonitor");

            _segmentFactory        = segmentFactory;
            _segmentOperationsLock = new object();
            _maxSegmentCount       = maxSegmentCount;
            _segmentsPath          = path;

            _itemCount       = 0;
            _boundedCapacity = -1;
            if (_segmentFactory.SegmentCapacity > 0)
            {
                _boundedCapacity = (long)_segmentFactory.SegmentCapacity * maxSegmentCount;
            }

            _nonFairItemThreshold    = Environment.ProcessorCount;
            _nonFairSegmentThreshold = _segmentFactory.SegmentCapacity > 0 ? Math.Max(1, (8 * Environment.ProcessorCount) / _segmentFactory.SegmentCapacity) : 16;

            var discoveredSegments = segmentFactory.DiscoverSegmentsWrapped(path);

            _segments = new CircularList <DiskQueueSegmentWrapper <T> >(discoveredSegments.OrderBy(o => o.Number));
            if (_segments.Count > 0)
            {
                for (int i = 0; i < _segments.Count; i++)
                {
                    if (i + 1 < _segments.Count)
                    {
                        _segments[i].NextSegment = _segments[i + 1]; // Build linked-list
                        if (_segments[i].Number == _segments[i + 1].Number)
                        {
                            throw new InvalidOperationException("DiscoverSegments returned duplicated segment numbers");
                        }
                    }
                    _itemCount += _segments[i].Count;
                }

                _headSegment       = _segments[0];
                _tailSegment       = _segments[_segments.Count - 1];
                _lastSegmentNumber = _tailSegment.Number;

                if (_tailSegment.IsFull)
                {
                    // Allocate new segment when tail is Full (prevent write modifications of segments from previous run)
                    var newTailSegment = segmentFactory.CreateSegmentWrapped(path, ++_lastSegmentNumber);
                    _tailSegment.NextSegment = newTailSegment;
                    _tailSegment             = newTailSegment;
                    _segments.Add(newTailSegment);
                }
            }
            else
            {
                // Allocate new segment
                _headSegment = _tailSegment = segmentFactory.CreateSegmentWrapped(path, ++_lastSegmentNumber);
                _segments.Add(_tailSegment);
            }

            _compactionPeriod = compactionPeriod;
            if (backgroundCompaction)
            {
                _backgroundCompactionThread = new DelegateThreadSetManager(1, this.GetType().GetCSName() + "_" + this.GetHashCode().ToString() + " Background compaction", BackgroundCompactionProc);
                _backgroundCompactionThread.IsBackground = true;
                _backgroundCompactionThread.Start();
            }

            _isDisposed = false;
        }
예제 #3
0
 /// <summary>
 /// DiskQueue constructor
 /// </summary>
 /// <param name="path">Path to the folder on the disk to store queue segments</param>
 /// <param name="segmentFactory">Factory to create DiskQueueSegments</param>
 /// <param name="maxSegmentCount">Maximum number of simultaniously active segments</param>
 /// <param name="backgroundCompaction">Is background compaction allowed (if not then compaction happens synchronously within the Take operation)</param>
 public DiskQueue(string path, DiskQueueSegmentFactory <T> segmentFactory, int maxSegmentCount, bool backgroundCompaction)
     : this(path, segmentFactory, maxSegmentCount, backgroundCompaction, CompactionPeriodMs)
 {
 }