/// <summary> /// Creates a segment that stores data in local memory array byte buffers /// </summary> internal override DefaultPileBase._segment MakeSegment(int segmentNumber) { var memory = new LocalMemory(SegmentSize); var result = new _segment(this, memory, true); return(result); }
private bool advanceBuffer() { var segs = m_Pile.m_Segments; if (segs.Count == 0) { return(false); } m_BufferIdx = -1; m_Buffer.Clear(); while (true) { _segment seg = null; while (m_SegmentIdx < segs.Count) { seg = segs[m_SegmentIdx]; if (seg == null || Thread.VolatileRead(ref seg.DELETED) != 0) { m_SegmentIdx++; m_Address = 0; } else { break; } } if (seg == null) { return(m_Buffer.Count > 0); } if (!m_Pile.getReadLock(seg)) { return(false); } try { var eof = crawlSegment(seg); if (eof) { m_SegmentIdx++; m_Address = 0; continue; } return(true); } finally { m_Pile.releaseReadLock(seg); } } }
private bool crawlSegment(_segment seg)//must be called under segment read lock { const int BUFFER_SIZE = 1024; var addr = 0;//start crawling from very first byte to avoid segment corruption as previous address may have been deleted while (addr < seg.Data.Length - CHUNK_HDER_SZ) { var add = addr >= m_Address; var chunkStartAdr = addr; var flag = seg.Data.ReadChunkFlag(addr); if (flag == ChunkFlag.Wrong) //todo In future corruption recovery attempt may take place if we scan form 8-aligned block { throw new PileException(StringConsts.PILE_CRAWL_INTERNAL_SEGMENT_CORRUPTION_ERROR.Args(addr)); } addr += 3; var payloadSize = seg.Data.ReadInt32(addr); addr += 4; var sver = seg.Data.ReadByte(addr); addr++; //read serializer version addr += payloadSize; //skip the body of payload if (!add) { continue; } m_Address = addr;//next chunk if (flag == ChunkFlag.Used) { var ptr = new PilePointer(m_SegmentIdx, chunkStartAdr); var dt = sver == SVER_UTF8 ? PileEntry.DataType.String : sver == SVER_BUFF ? PileEntry.DataType.Buffer : sver == SVER_LINK ? PileEntry.DataType.Link : PileEntry.DataType.Object; var entry = new PileEntry(ptr, dt, payloadSize); m_Buffer.Add(entry); if (m_Buffer.Count == BUFFER_SIZE) { return(m_Address >= seg.Data.Length - CHUNK_HDER_SZ); } } }//while return(true); }
/// <summary> /// Puts a CLR object into the pile and returns a newly-allocated pointer. /// Throws out-of-space if there is not enough space in the pile and limits are set. /// Optional lifeSpanSec is ignored by this implementation /// </summary> public PilePointer Put(object obj, uint lifeSpanSec = 0) { if (!Running) return PilePointer.Invalid; if (obj==null) throw new PileException(StringConsts.ARGUMENT_ERROR+GetType().Name+".Put(obj==null)"); Interlocked.Increment(ref m_stat_PutCount); //1 serialize to determine the size int serializedSize; byte serializerVersion; var buffer = serialize(obj, out serializedSize, out serializerVersion); var payloadSize = IntMath.Align8(serializedSize); var chunkSize = CHUNK_HDER_SZ + payloadSize; if (chunkSize>m_SegmentSize) throw new PileOutOfSpaceException(StringConsts.PILE_OBJECT_LARGER_SEGMENT_ERROR.Args(payloadSize)); while(true) { if (!Running) return PilePointer.Invalid; var segs = m_Segments; for(var idxSegment=0; idxSegment<segs.Count; idxSegment++) { var seg = segs[idxSegment]; if (seg==null) continue; if (seg.DELETED!=0) continue; var sused = seg.UsedBytes; if (seg.FreeCapacity > chunkSize) { if (!getWriteLock(seg)) return PilePointer.Invalid; try { if (Thread.VolatileRead(ref seg.DELETED)==0 && seg.FreeCapacity > chunkSize) { var adr = seg.Allocate(buffer, payloadSize, serializerVersion); if (adr>=0) return new PilePointer(idxSegment, adr);//allocated before crawl var utcNow = DateTime.UtcNow; if ((utcNow - seg.LastCrawl).TotalSeconds > (m_AllocMode==AllocationMode.FavorSpeed ? 30 : 5)) { //could not fit, try to reclaim seg.Crawl(); seg.LastCrawl = utcNow; //try again adr = seg.Allocate(buffer, payloadSize, serializerVersion); if (adr>=0) return new PilePointer(idxSegment, adr);//allocated after crawl } //if we are here - still could not allocate, will try next segment in iteration } } finally { releaseWriteLock(seg); } } }//for //if we are here, we still could not allocate space of existing segments if (m_AllocMode==AllocationMode.FavorSpeed) break; var nsegs = m_Segments; if (segs.Count >= nsegs.Count) break;//if segment list grew already, repeat the whole thing again as more space may have become available }//while //allocate segment lock(m_SegmentsLock) { if ( (m_MaxMemoryLimit>0 && AllocatedMemoryBytes+m_SegmentSize > m_MaxMemoryLimit )|| (m_MaxSegmentLimit>0 && (m_Segments.Count( s => s!=null ) + 1) > m_MaxSegmentLimit ) ) throw new PileOutOfSpaceException(StringConsts.PILE_OUT_OF_SPACE_ERROR.Args(m_MaxMemoryLimit, m_MaxSegmentLimit, m_SegmentSize)); var newSeg = new _segment(this); var newSegs = new List<_segment>(m_Segments); newSegs.Add( newSeg ); var adr = newSeg.Allocate(buffer, payloadSize, serializerVersion); var pp = new PilePointer(newSegs.Count-1, adr); m_Segments = newSegs; return pp; } }
private void releaseWriteLock(_segment segment) { segment.RWSynchronizer.ReleaseWriteLock(); }
//the writer lock allows only 1 writer at a time that conflicts with a single reader private bool getWriteLock(_segment segment) { return segment.RWSynchronizer.GetWriteLock((_) => !this.Running); }
private void releaseWriteLock(_segment segment) { Thread.VolatileWrite(ref segment.LCK_READERS, 0L); }
//the writer lock allows only 1 writer at a time that conflicts with a single reader private bool getWriteLock(_segment segment) { long spinCount = 0; var tightWait = Interlocked.Increment(ref segment.LCK_WAITING_WRITERS) < CPU_COUNT; while(Interlocked.CompareExchange(ref segment.LCK_READERS, long.MinValue, 0)!=0) { if (tightWait) { if (spinCount<10000) { Thread.SpinWait(500); spinCount++; continue; } if (spinCount<12000) { if (!Thread.Yield()) Thread.SpinWait(1000); spinCount++; continue; } } if (!this.Running) { Interlocked.Decrement(ref segment.LCK_WAITING_WRITERS); return false;//lock failed } Thread.Sleep(10 + (Thread.CurrentThread.GetHashCode() & 0xf)); spinCount++; } Interlocked.Decrement(ref segment.LCK_WAITING_WRITERS); return true;//lock taken }
private void releaseReadLock(_segment segment) { Interlocked.Decrement(ref segment.LCK_READERS); }
//the reader lock allows to have many readers but only 1 writer private bool getReadLock(_segment segment) { //Since there are 2^63 positive combinations, even if the system has 1000 real threads that all physically execute at the same time (will never happen), //there are 100 10-ms intervals a second = 1000 threads * 100 intervals * 60 sec = 6,000,000 increments a minute in the worst case (out of 2^63) //which means that LCK_READERS will take around 2 million years to reach ZERO long spinCount = 0; while(Interlocked.Increment(ref segment.LCK_READERS)<=0) { if (spinCount<12000) { var tightWait = Thread.VolatileRead(ref segment.LCK_WAITING_WRITERS) < CPU_COUNT; if (tightWait) { if (spinCount>10000) if (!Thread.Yield()) Thread.SpinWait(1000); else Thread.SpinWait(500); spinCount++; continue; } } //severe contention if (!this.Running) return false;//lock failed Thread.Sleep(10 + (Thread.CurrentThread.GetHashCode() & 0xf)); spinCount++; }//while return true;//lock taken }