internal void pack(Page pg, int i) { byte[] dst = pg.data; switch (key.type) { case ClassDescriptor.FieldType.tpBoolean: case ClassDescriptor.FieldType.tpSByte: case ClassDescriptor.FieldType.tpByte: dst[BtreePage.firstKeyOffs + i] = (byte)key.ival; break; case ClassDescriptor.FieldType.tpShort: case ClassDescriptor.FieldType.tpUShort: case ClassDescriptor.FieldType.tpChar: Bytes.pack2(dst, BtreePage.firstKeyOffs + i * 2, (short)key.ival); break; case ClassDescriptor.FieldType.tpInt: case ClassDescriptor.FieldType.tpUInt: case ClassDescriptor.FieldType.tpEnum: case ClassDescriptor.FieldType.tpObject: case ClassDescriptor.FieldType.tpOid: Bytes.pack4(dst, BtreePage.firstKeyOffs + i * 4, key.ival); break; case ClassDescriptor.FieldType.tpLong: case ClassDescriptor.FieldType.tpULong: case ClassDescriptor.FieldType.tpDate: Bytes.pack8(dst, BtreePage.firstKeyOffs + i * 8, key.lval); break; case ClassDescriptor.FieldType.tpFloat: Bytes.packF4(dst, BtreePage.firstKeyOffs + i * 4, (float)key.dval); break; case ClassDescriptor.FieldType.tpDouble: Bytes.packF8(dst, BtreePage.firstKeyOffs + i * 8, key.dval); break; case ClassDescriptor.FieldType.tpDecimal: Bytes.packDecimal(dst, BtreePage.firstKeyOffs + i * 16, key.dec); break; case ClassDescriptor.FieldType.tpGuid: Bytes.packGuid(dst, BtreePage.firstKeyOffs + i * 16, key.guid); break; default: Debug.Assert(false, "Invalid type"); break; } Bytes.pack4(dst, BtreePage.firstKeyOffs + (BtreePage.maxItems - i - 1) * 4, oid); }
public void watchdog() { lock (mutex) { while (!shutdown) { Monitor.Wait(mutex, storage.replicationReceiveTimeout); if (!shutdown) { Bytes.pack8(txBuf, 0, ReplicationSlaveStorageImpl.REPL_PING); for (int i = 0; i < replicas.Length; i++) { Send(i, txBuf); } } } } }
public virtual void Write(long pos, byte[] buf) { lock (mutex) { if (pageTimestamps != null) { int pageNo = (int)(pos >> Page.pageSizeLog); if (pageNo >= pageTimestamps.Length) { int newLength = pageNo >= pageTimestamps.Length * 2 ? pageNo + 1 : pageTimestamps.Length * 2; int[] newPageTimestamps = new int[newLength]; Array.Copy(pageTimestamps, 0, newPageTimestamps, 0, pageTimestamps.Length); pageTimestamps = newPageTimestamps; int[] newDirtyPageTimestampMap = new int[(((newLength * 4 + Page.pageSize - 1) >> Page.pageSizeLog) + 31) >> 5]; Array.Copy(dirtyPageTimestampMap, 0, newDirtyPageTimestampMap, 0, dirtyPageTimestampMap.Length); dirtyPageTimestampMap = newDirtyPageTimestampMap; } pageTimestamps[pageNo] = ++timestamp; dirtyPageTimestampMap[pageNo >> (Page.pageSizeLog - 2 + 5)] |= 1 << ((pageNo >> (Page.pageSizeLog - 2)) & 31); } Bytes.pack8(txBuf, 0, pos); Array.Copy(buf, 0, txBuf, 8, buf.Length); if (pageTimestamps != null) { Bytes.pack4(txBuf, Page.pageSize + 8, timestamp); } for (int i = 0; i < replicas.Length; i++) { if (Send(i, txBuf)) { if (ack && pos == 0 && !Receive(i, rcBuf)) { Console.WriteLine("Failed to receive ACK from node " + replicas[i].host); } } } } file.Write(pos, buf); }
public int packI8(int offs, long val) { extend(offs + 8); Bytes.pack8(arr, offs, val); return(offs + 8); }
public override void Write(long pos, byte[] buf) { file.Write(pos, buf); lock (mutex) { if (pageTimestamps != null) { int pageNo = (int)(pos >> Page.pageSizeLog); if (pageNo >= pageTimestamps.Length) { int newLength = pageNo >= pageTimestamps.Length * 2 ? pageNo + 1 : pageTimestamps.Length * 2; int[] newPageTimestamps = new int[newLength]; Array.Copy(pageTimestamps, 0, newPageTimestamps, 0, pageTimestamps.Length); pageTimestamps = newPageTimestamps; int[] newDirtyPageTimestampMap = new int[(((newLength * 4 + Page.pageSize - 1) >> Page.pageSizeLog) + 31) >> 5]; Array.Copy(dirtyPageTimestampMap, 0, newDirtyPageTimestampMap, 0, dirtyPageTimestampMap.Length); dirtyPageTimestampMap = newDirtyPageTimestampMap; } pageTimestamps[pageNo] = ++timestamp; dirtyPageTimestampMap[pageNo >> (Page.pageSizeLog - 2 + 5)] |= 1 << ((pageNo >> (Page.pageSizeLog - 2)) & 31); } } for (int i = 0; i < replicas.Length; i++) { if (replicas[i].socket != null) { byte[] data = new byte[txBuf.Length]; Bytes.pack8(data, 0, pos); Array.Copy(buf, 0, data, 8, buf.Length); if (pageTimestamps != null) { Bytes.pack4(data, Page.pageSize + 8, timestamp); } Parcel p = new Parcel(); p.data = data; p.pos = pos; p.host = i; lock (async) { buffered += data.Length; while (buffered > asyncBufSize && buffered != data.Length) { Monitor.Wait(async); } } lock (go) { if (head == null) { head = tail = p; } else { tail = tail.next = p; } Monitor.Pulse(go); } } } }
public virtual void Close() { lock (mutex) { shutdown = true; Monitor.Pulse(mutex); } watchdogThread.Join(); if (listenThread != null) { lock (mutex) { listening = false; } Socket s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); #if NET_FRAMEWORK_20 foreach (IPAddress ip in Dns.GetHostEntry("localhost").AddressList) #else foreach (IPAddress ip in Dns.Resolve("localhost").AddressList) #endif { try { s.Connect(new IPEndPoint(ip, port)); s.Close(); } catch (SocketException) {} } listenThread.Join(); try { listenSocket.Close(); } catch (SocketException) {} } for (int i = 0; i < replicas.Length; i++) { Thread t = replicas[i].syncThread; if (t != null) { t.Join(); } } file.Close(); Bytes.pack8(txBuf, 0, ReplicationSlaveStorageImpl.REPL_CLOSE); for (int i = 0; i < replicas.Length; i++) { if (replicas[i].socket != null) { try { replicas[i].socket.Send(txBuf); replicas[i].socket.Close(); } catch (SocketException) {} } } if (pageTimestampFile != null) { pageTimestampFile.Close(); } }
void synchronizeNewSlaveNode(int i) { long size = storage.DatabaseSize; int[] syncNodeTimestamps = null; byte[] txBuf; if (pageTimestamps != null) { txBuf = new byte[12 + Page.pageSize]; byte[] psBuf = new byte[4]; if (!Receive(i, psBuf)) { Console.WriteLine("Failed to receive page timestamps length from slave node " + replicas[i].host); return; } int psSize = Bytes.unpack4(psBuf, 0); psBuf = new byte[psSize * 4]; if (!Receive(i, psBuf)) { Console.WriteLine("Failed to receive page timestamps from slave node " + replicas[i].host); return; } syncNodeTimestamps = new int[psSize]; for (int j = 0; j < psSize; j++) { syncNodeTimestamps[j] = Bytes.unpack4(psBuf, j * 4); } } else { txBuf = new byte[8 + Page.pageSize]; } for (long pos = 0; pos < size; pos += Page.pageSize) { int pageNo = (int)(pos >> Page.pageSizeLog); if (syncNodeTimestamps != null) { if (pageNo < syncNodeTimestamps.Length && pageNo < pageTimestamps.Length && syncNodeTimestamps[pageNo] == pageTimestamps[pageNo]) { continue; } } lock (storage) { lock (storage.objectCache) { Page pg = storage.pool.getPage(pos); Bytes.pack8(txBuf, 0, pos); Array.Copy(pg.data, 0, txBuf, 8, Page.pageSize); storage.pool.unfix(pg); if (syncNodeTimestamps != null) { Bytes.pack4(txBuf, Page.pageSize + 8, pageNo < pageTimestamps.Length ? pageTimestamps[pageNo] : 0); } } } if (!Send(i, txBuf)) { return; } if (ack && pos == 0 && !Receive(i, rcBuf)) { Console.WriteLine("Failed to receive ACK from node " + replicas[i].host); return; } } Bytes.pack8(txBuf, 0, ReplicationSlaveStorageImpl.REPL_SYNC); Send(i, txBuf); }