public static bool Publish(object Message, params uint[] Groups) { uint len; if (Groups == null) { len = 0; } else { len = (uint)Groups.Length; } using (CScopeUQueue su = new CScopeUQueue()) { CUQueue q = su.UQueue; q.Save(Message); unsafe { fixed(byte *buffer = q.m_bytes) { fixed(uint *p = Groups) { return(ServerCoreLoader.SpeakPush(buffer, q.GetSize(), p, len)); } } } } }
private bool DataFromServerToClient(CAsyncServiceHandler sender, ushort reqId, CUQueue qData) { bool processed = false; switch (reqId) { case CStreamSerializationHelper.idReadDataFromServerToClient: if (qData.GetSize() > 0) { lock (m_cs) { CStreamSerializationHelper.Write(m_s, qData); if (Progress != null) { Progress.Invoke(this, (ulong)m_s.Position); } } qData.SetSize(0); processed = true; } break; default: break; } return(processed); }
public static void BatchMessage(ushort idMessage, byte[] message, uint len, CUQueue q) { if (message == null) { message = new byte[0]; len = 0; } else if (len > message.Length) { len = (uint)message.Length; } if (q.GetSize() == 0) { uint count = 1; q.Save(count); } else { unsafe { fixed(byte *p = q.IntenalBuffer) { uint *pN = (uint *)p; *pN += 1; } } } q.Save(idMessage).Save(len); q.Push(message, len); }
private bool PushText(string text) { using (CScopeUQueue sb = new CScopeUQueue()) { CUQueue q = sb.UQueue; q.Push(text); return(PushBlob(q.IntenalBuffer, q.GetSize(), (ushort)tagVariantDataType.sdVT_BSTR)); } }
public Task <CScopeUQueue> sendRequest <T0>(ushort reqId, T0 t0) { using (CScopeUQueue sb = new CScopeUQueue()) { CUQueue b = sb.UQueue; b.Save(t0); return(sendRequest(reqId, b.IntenalBuffer, b.GetSize())); } }
public Task <CScopeUQueue> sendRequest <T0, T1, T2, T3>(ushort reqId, T0 t0, T1 t1, T2 t2, T3 t3) { using (CScopeUQueue sb = new CScopeUQueue()) { CUQueue b = sb.UQueue; b.Save(t0).Save(t1).Save(t2).Save(t3); return(sendRequest(reqId, b.IntenalBuffer, b.GetSize())); } }
public Task <CScopeUQueue> sendRequest <T0, T1, T2, T3, T4, T5, T6, T7>(ushort reqId, T0 t0, T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7) { using (CScopeUQueue sb = new CScopeUQueue()) { CUQueue b = sb.UQueue; b.Save(t0).Save(t1).Save(t2).Save(t3).Save(t4).Save(t5).Save(t6).Save(t7); return(sendRequest(reqId, b.IntenalBuffer, b.GetSize())); } }
private void Transferring() { CUQueue q = UQueue; while (q.GetSize() > 0) { object vt; q.Load(out vt); m_vParam.Add(vt); } }
public virtual bool EnqueueBatch(byte[] key, CUQueue q, DEnqueue e, DDiscarded discarded) { if (q == null || q.GetSize() < 2 * sizeof(uint)) { throw new InvalidOperationException("Bad operation"); } CUQueue sb = CScopeUQueue.Lock(); sb.Save(key).Push(q.IntenalBuffer, q.HeadPosition, q.Size); q.SetSize(0); bool ok = SendRequest(idEnqueueBatch, sb, GetRH(e), discarded, (DOnExceptionFromServer)null); CScopeUQueue.Unlock(sb); return(ok); }
public bool EnqueueBatch(byte[] key, CUQueue q, DEnqueue e) { if (key == null) key = new byte[0]; if (q == null || q.GetSize() < 2 * sizeof(uint)) { throw new InvalidOperationException("Bad operation"); } CUQueue sb = CScopeUQueue.Lock(); sb.Save(key).Push(q.IntenalBuffer, q.HeadPosition, q.Size); q.SetSize(0); bool ok = SendRequest(idEnqueueBatch, sb, GetRH(e)); CScopeUQueue.Unlock(sb); return ok; }
private bool SendRows(CUQueue q, bool transferring) { uint ret; bool batching = (BytesBatched >= DB_CONSTS.DEFAULT_RECORD_BATCH_SIZE); if (batching) { CommitBatching(); } ret = SendResult(transferring ? DB_CONSTS.idTransferring : DB_CONSTS.idEndRows, q.IntenalBuffer, q.GetSize()); if (batching) { StartBatching(); } if (ret != q.GetSize()) { return(false); //socket closed or request canceled } q.SetSize(0); return(true); }
protected override void OnResultReturned(ushort reqId, CUQueue mc) { switch (reqId) { case idSQLGetInfo: lock (m_csDB) { m_mapInfo.Clear(); while (mc.GetSize() > 0) { ushort infoType; object infoValue; mc.Load(out infoType).Load(out infoValue); m_mapInfo[infoType] = infoValue; } } break; default: base.OnResultReturned(reqId, mc); break; } }
protected override void OnResultReturned(ushort reqId, CUQueue mc) { switch (reqId) { case idDBUpdate: if (mc.GetSize() > 0) { int dbEventType; string dbInstance, dbPath, tablePath; object idRow; mc.Load(out dbEventType).Load(out dbInstance).Load(out dbPath).Load(out tablePath).Load(out idRow); if (DBEvent != null) { DBEvent(this, (tagUpdateEvent)dbEventType, dbInstance, dbPath, tablePath, idRow); } } break; default: base.OnResultReturned(reqId, mc); break; } }
private bool PushRows(SqlDataReader reader, CDBColumnInfoArray vCol) { using (CScopeUQueue sb = new CScopeUQueue()) { CUQueue q = sb.UQueue; while (reader.Read()) { if (q.GetSize() >= DB_CONSTS.DEFAULT_RECORD_BATCH_SIZE && !SendRows(q, false)) { return(false); } int col = 0; foreach (CDBColumnInfo info in vCol) { if (reader.IsDBNull(col)) { q.Save((ushort)tagVariantDataType.sdVT_NULL); ++col; continue; } switch (info.DataType) { case tagVariantDataType.sdVT_BSTR: if (info.DeclaredType == "xml") { string xml = reader.GetSqlXml(col).Value; if (xml.Length <= DB_CONSTS.DEFAULT_BIG_FIELD_CHUNK_SIZE) { q.Save((ushort)info.DataType).Save(xml); } else { if (q.GetSize() != 0 && !SendRows(q, true)) { return(false); } if (!PushText(xml)) { return(false); } } } else if (info.DeclaredType == "datetimeoffset") { DateTimeOffset dto = reader.GetDateTimeOffset(col); q.Save((ushort)info.DataType).Save(dto.ToString()); } else if (info.ColumnSize == 0) //for example, case "time" { object obj = reader.GetValue(col); q.Save((ushort)info.DataType).Save(obj.ToString()); } else { string s = reader.GetString(col); if (s.Length <= DB_CONSTS.DEFAULT_BIG_FIELD_CHUNK_SIZE) { q.Save((ushort)info.DataType).Save(s); } else ////text, ntext, varchar(max), nvarchar(max) { if (q.GetSize() != 0 && !SendRows(q, true)) { return(false); } if (!PushText(s)) { return(false); } } } break; case (tagVariantDataType.sdVT_UI1 | tagVariantDataType.sdVT_ARRAY): { SqlBinary bytes = reader.GetSqlBinary(col); if (bytes.Length <= 2 * DB_CONSTS.DEFAULT_BIG_FIELD_CHUNK_SIZE) { q.Save((ushort)info.DataType).Save(bytes.Value); } else //image, varbinary(max) or file? { if (q.GetSize() != 0 && !SendRows(q, true)) { return(false); } if (!PushBlob(bytes.Value, (uint)bytes.Length)) { return(false); } } } break; case tagVariantDataType.sdVT_I8: q.Save((ushort)info.DataType).Save(reader.GetInt64(col)); break; case tagVariantDataType.sdVT_I4: case tagVariantDataType.sdVT_INT: q.Save((ushort)info.DataType).Save(reader.GetInt32(col)); break; case tagVariantDataType.sdVT_I2: q.Save((ushort)info.DataType).Save(reader.GetInt16(col)); break; case tagVariantDataType.sdVT_UI1: q.Save((ushort)info.DataType).Save(reader.GetByte(col)); break; case tagVariantDataType.sdVT_R4: q.Save((ushort)info.DataType).Save(reader.GetFloat(col)); break; case tagVariantDataType.sdVT_R8: q.Save((ushort)info.DataType).Save(reader.GetDouble(col)); break; case tagVariantDataType.sdVT_BOOL: q.Save((ushort)info.DataType); if (reader.GetBoolean(col)) { q.Save((short)-1); } else { q.Save((short)0); } break; case tagVariantDataType.sdVT_DATE: q.Save((ushort)info.DataType).Save(reader.GetDateTime(col)); break; case tagVariantDataType.sdVT_DECIMAL: q.Save((ushort)info.DataType).Save(reader.GetDecimal(col)); break; case tagVariantDataType.sdVT_CLSID: q.Save((ushort)info.DataType).Save(reader.GetGuid(col)); break; case tagVariantDataType.sdVT_VARIANT: q.Save(reader.GetValue(col)); break; default: break; } ++col; } } uint ret = SendResult(DB_CONSTS.idEndRows, q.IntenalBuffer, q.GetSize()); if (ret != q.GetSize()) { return(false); //socket closed or request canceled } } return(true); }
private uint Pop(CUQueue UQueue, ref DataRelationCollection drc) { int n; string str; bool b; int nData; uint nSize = UQueue.GetSize(); UQueue.Load(out nData); drc.Clear(); for (n = 0; n < nData; n++) { DataColumn[] dcsChild = null; PopTableColNamesOnly(UQueue, ref dcsChild); UQueue.Load(out b); UQueue.Load(out str); DataColumn[] dcsParent = null; PopTableColNamesOnly(UQueue, ref dcsParent); DataRelation dr = new DataRelation(str, dcsParent, dcsChild); dr.Nested = b; drc.Add(dr); } return (nSize - UQueue.GetSize()); }
protected override void OnResultReturned(ushort reqId, CUQueue mc) { switch (reqId) { case idDownload: { int res; string errMsg; mc.Load(out res).Load(out errMsg); DDownload dl; lock (m_csFile) { CContext context = m_vContext[0]; if (context.File != null) { context.File.Close(); context.File = null; } else if (res == 0) { res = CANNOT_OPEN_LOCAL_FILE_FOR_WRITING; errMsg = context.ErrMsg; } dl = context.Download; } if (dl != null) { dl(this, res, errMsg); } lock (m_csFile) { m_vContext.RemoveFromFront(); } } break; case idStartDownloading: lock (m_csFile) { CContext context = m_vContext[0]; mc.Load(out context.FileSize); try { FileMode fm; if ((context.Flags & FILE_OPEN_TRUNCACTED) == FILE_OPEN_TRUNCACTED) { fm = FileMode.Create; } else if ((context.Flags & FILE_OPEN_APPENDED) == FILE_OPEN_APPENDED) { fm = FileMode.Append; } else { fm = FileMode.OpenOrCreate; } FileShare fs = FileShare.None; if ((context.Flags & FILE_OPEN_SHARE_WRITE) == FILE_OPEN_SHARE_WRITE) { fs = FileShare.Write; } context.File = new FileStream(context.LocalFile, fm, FileAccess.Write, fs); } catch (Exception err) { context.ErrMsg = err.Message; } finally { } } break; case idDownloading: { long downloaded = -1; DTransferring trans = null; lock (m_cs) { CContext context = m_vContext[0]; trans = context.Transferring; if (context.File != null) { byte[] buffer = mc.IntenalBuffer; context.File.Write(buffer, 0, (int)mc.GetSize()); downloaded = context.File.Position; } } mc.SetSize(0); if (trans != null) { trans(this, downloaded); } } break; case idUpload: { bool removed = false; DUpload upl = null; int res; string errMsg; mc.Load(out res).Load(out errMsg); if (res != 0) { lock (m_csFile) { CContext context = m_vContext[0]; removed = true; upl = context.Upload; if (context.File != null) { context.File.Close(); } } } if (upl != null) { upl(this, res, errMsg); } if (removed) { lock (m_csFile) { m_vContext.RemoveFromFront(); } } } break; case idUploading: { DTransferring trans = null; long uploaded; mc.Load(out uploaded); if (uploaded > 0) { lock (m_csFile) { CContext context = m_vContext[0]; trans = context.Transferring; } } if (trans != null) { trans(this, uploaded); } } break; case idUploadCompleted: { DUpload upl = null; lock (m_csFile) { CContext context = m_vContext[0]; upl = context.Upload; if (context.File != null) { context.File.Close(); context.File = null; } } if (upl != null) { upl(this, 0, ""); } lock (m_csFile) { m_vContext.RemoveFromFront(); } } break; default: base.OnResultReturned(reqId, mc); break; } lock (m_csFile) { Transfer(); } }
public virtual bool Send(IDataReader dr, uint batchSize) { bool bSuc = false; if (dr == null) { throw new ArgumentNullException("Must pass in a valid data reader interface!"); } if (AttachedClientSocket == null) { throw new InvalidOperationException("The asynchronous handler must be attached to an instance of CClientSocket first!"); } bool rr = RouteeRequest; bool bBatching = Batching; if (!bBatching) { StartBatching(); } using (CScopeUQueue UQueue = new CScopeUQueue()) { CUQueue AdoUQueue = UQueue.UQueue; do { m_AdoSerializer.PushHeader(AdoUQueue, dr); if (batchSize < 2048) { batchSize = 2048; } AdoUQueue.Save(batchSize); if (rr) { bSuc = SendRouteeResult(AdoUQueue, CAdoSerializationHelper.idDataReaderHeaderArrive); } else { bSuc = SendRequest(CAdoSerializationHelper.idDataReaderHeaderArrive, AdoUQueue, m_arh); } AdoUQueue.SetSize(0); //monitor socket close event if (!bSuc) { break; } while (dr.Read()) { m_AdoSerializer.Push(AdoUQueue, dr); if (AdoUQueue.GetSize() > batchSize) { if (rr) { bSuc = SendRouteeResult(AdoUQueue, CAdoSerializationHelper.idDataReaderRecordsArrive); } else { bSuc = SendRequest(CAdoSerializationHelper.idDataReaderRecordsArrive, AdoUQueue, m_arh); } AdoUQueue.SetSize(0); if (!bSuc) { break; } if (AttachedClientSocket.BytesBatched > 2 * batchSize) { //if we find too much are stored in batch queue, we send them and start a new batching CommitBatching(true); StartBatching(); } if (AttachedClientSocket.BytesInSendingBuffer > 60 * 1024) { CommitBatching(true); //if we find there are too much data in sending buffer, we wait until all of data are sent and processed. WaitAll(); StartBatching(); } } } if (!bSuc) { break; } if (AdoUQueue.GetSize() > 0) //remaining { if (rr) { bSuc = SendRouteeResult(AdoUQueue, CAdoSerializationHelper.idDataReaderRecordsArrive); } else { bSuc = SendRequest(CAdoSerializationHelper.idDataReaderRecordsArrive, AdoUQueue, m_arh); } AdoUQueue.SetSize(0); } if (!bSuc) { break; } } while (false); if (bSuc) { if (rr) { bSuc = SendRouteeResult(CAdoSerializationHelper.idEndDataReader); } else { bSuc = SendRequest(CAdoSerializationHelper.idEndDataReader, m_arh); } } if (!bBatching) { CommitBatching(true); } } return(bSuc); }
protected override void OnResultReturned(ushort reqId, CUQueue mc) { switch (reqId) { case idDownload: { int res; string errMsg; mc.Load(out res).Load(out errMsg); DDownload dl = null; lock (m_csFile) { if (m_vContext.Count > 0) { CContext ctx = m_vContext[0]; ctx.ErrCode = res; ctx.ErrMsg = errMsg; dl = ctx.Download; } } if (dl != null) { dl.Invoke(this, res, errMsg); } lock (m_csFile) { if (m_vContext.Count > 0) { CloseFile(m_vContext.RemoveFromFront()); } } OnPostProcessing(0, 0); } break; case idStartDownloading: lock (m_csFile) { long fileSize; string localFile, remoteFile; uint flags; long initSize; mc.Load(out fileSize).Load(out localFile).Load(out remoteFile).Load(out flags).Load(out initSize); lock (m_csFile) { if (m_vContext.Count == 0) { CContext ctx = new CContext(false, flags); ctx.LocalFile = localFile; ctx.FilePath = remoteFile; OpenLocalWrite(ctx); ctx.InitSize = initSize; m_vContext.AddToBack(ctx); } CContext context = m_vContext[0]; context.FileSize = fileSize; initSize = (context.InitSize > 0) ? context.InitSize : 0; if (context.File.Position > initSize) { context.File.SetLength(initSize); } } } break; case idDownloading: { long downloaded = 0; DTransferring trans = null; CContext context = null; lock (m_csFile) { if (m_vContext.Count > 0) { context = m_vContext[0]; trans = context.Transferring; byte[] buffer = mc.IntenalBuffer; try { context.File.Write(buffer, 0, (int)mc.GetSize()); long initSize = (context.InitSize > 0) ? context.InitSize : 0; downloaded = context.File.Position - initSize; } catch (System.IO.IOException err) { context.ErrMsg = err.Message; #if NO_HRESULT context.ErrCode = CANNOT_OPEN_LOCAL_FILE_FOR_WRITING; #else context.ErrCode = err.HResult; #endif } } } mc.SetSize(0); if (context != null && context.HasError) { if (context.Download != null) { context.Download.Invoke(this, context.ErrCode, context.ErrMsg); } CloseFile(m_vContext.RemoveFromFront()); OnPostProcessing(0, 0); } else if (trans != null) { trans.Invoke(this, downloaded); } } break; case idUploadBackup: break; case idUpload: { CContext context = null; int res; string errMsg; mc.Load(out res).Load(out errMsg); if (res != 0 || (errMsg != null && errMsg.Length > 0)) { lock (m_csFile) { if (m_vContext.Count > 0) { context = m_vContext[0]; mc.Load(out context.InitSize); context.ErrCode = res; context.ErrMsg = errMsg; } } } else { CClientSocket cs = Socket; lock (m_csFile) { if (m_vContext.Count > 0) { context = m_vContext[0]; mc.Load(out context.InitSize); using (CScopeUQueue sb = new CScopeUQueue()) { DAsyncResultHandler rh = null; DOnExceptionFromServer se = null; if (sb.UQueue.MaxBufferSize < STREAM_CHUNK_SIZE) { sb.UQueue.Realloc(STREAM_CHUNK_SIZE); } byte[] buffer = sb.UQueue.IntenalBuffer; try { context.QueueOk = cs.ClientQueue.StartJob(); bool queue_enabled = cs.ClientQueue.Available; if (queue_enabled) { SendRequest(idUploadBackup, context.FilePath, context.Flags, context.FileSize, context.InitSize, rh, context.Discarded, se); } int ret = context.File.Read(buffer, 0, (int)STREAM_CHUNK_SIZE); while (ret == STREAM_CHUNK_SIZE) { if (!SendRequest(idUploading, buffer, (uint)ret, rh, context.Discarded, se)) { context.ErrCode = cs.ErrorCode; context.ErrMsg = cs.ErrorMsg; break; } ret = context.File.Read(buffer, 0, (int)STREAM_CHUNK_SIZE); if (queue_enabled) { //save file into client message queue } else if (cs.BytesInSendingBuffer > 40 * STREAM_CHUNK_SIZE) { break; } } if (ret > 0 && !context.HasError) { if (!SendRequest(idUploading, buffer, (uint)ret, rh, context.Discarded, se)) { context.ErrCode = cs.ErrorCode; context.ErrMsg = cs.ErrorMsg; } } if (ret < STREAM_CHUNK_SIZE && !context.HasError) { context.Sent = true; SendRequest(idUploadCompleted, rh, context.Discarded, se); if (context.QueueOk) { Socket.ClientQueue.EndJob(); } } } catch (System.IO.IOException err) { errMsg = err.Message; #if NO_HRESULT res = CANNOT_OPEN_LOCAL_FILE_FOR_READING; #else res = err.HResult; #endif context.ErrCode = res; context.ErrMsg = errMsg; } } } } } if (context != null && context.HasError) { if (context.Upload != null) { context.Upload.Invoke(this, context.ErrCode, context.ErrMsg); } lock (m_csFile) { CloseFile(m_vContext.RemoveFromFront()); } if (context.QueueOk) { Socket.ClientQueue.AbortJob(); } OnPostProcessing(0, 0); } } break; case idUploading: { int errCode = 0; string errMsg = ""; CContext context = null; DTransferring trans = null; long uploaded; mc.Load(out uploaded); if (mc.GetSize() >= 8) { mc.Load(out errCode).Load(out errMsg); } lock (m_csFile) { if (m_vContext.Count > 0) { context = m_vContext[0]; trans = context.Transferring; if (uploaded < 0 || errCode != 0 || errMsg.Length != 0) { context.ErrCode = errCode; context.ErrMsg = errMsg; CloseFile(context); } else if (!context.Sent) { using (CScopeUQueue sb = new CScopeUQueue()) { DAsyncResultHandler rh = null; DOnExceptionFromServer se = null; if (sb.UQueue.MaxBufferSize < STREAM_CHUNK_SIZE) { sb.UQueue.Realloc(STREAM_CHUNK_SIZE); } byte[] buffer = sb.UQueue.IntenalBuffer; try { int ret = context.File.Read(buffer, 0, (int)STREAM_CHUNK_SIZE); if (ret > 0) { SendRequest(idUploading, buffer, (uint)ret, rh, context.Discarded, se); } if (ret < STREAM_CHUNK_SIZE) { context.Sent = true; SendRequest(idUploadCompleted, rh, context.Discarded, se); } } catch (System.IO.IOException err) { context.ErrMsg = err.Message; #if NO_HRESULT context.ErrCode = CANNOT_OPEN_LOCAL_FILE_FOR_READING; #else context.ErrCode = err.HResult; #endif } } } } } if (context != null && context.HasError) { if (context.Upload != null) { context.Upload.Invoke(this, context.ErrCode, context.ErrMsg); } lock (m_csFile) { CloseFile(m_vContext.RemoveFromFront()); } OnPostProcessing(0, 0); } else if (trans != null) { trans.Invoke(this, uploaded); } } break; case idUploadCompleted: { DUpload upl = null; lock (m_csFile) { if (m_vContext.Count > 0) { if (m_vContext[0].File != null) { upl = m_vContext[0].Upload; } else { m_vContext[0].QueueOk = false; m_vContext[0].Sent = false; CloseFile(m_vContext[0]); } } } if (upl != null) { upl.Invoke(this, 0, ""); } lock (m_csFile) { if (m_vContext.Count > 0) { if (m_vContext[0].File != null) { CloseFile(m_vContext.RemoveFromFront()); } } } OnPostProcessing(0, 0); } break; default: base.OnResultReturned(reqId, mc); break; } }
public virtual ulong Send(DataTable dt, uint batchSize) { uint res; ulong nSize; bool bSuc; if (dt == null) { throw new ArgumentException("Must pass in an valid DataTable object!"); } using (CScopeUQueue su = new CScopeUQueue()) { CUQueue UQueue = su.UQueue; bool bBatching = Batching; if (!bBatching) { bSuc = StartBatching(); } do { //m_AdoSerializer->PushHeader(UQueue, dt, bNeedParentRelations, bNeedChildRelations); m_AdoSerializer.PushHeader(UQueue, dt, false, false); if (batchSize < 2048) { batchSize = 2048; } UQueue.Save(batchSize); nSize = res = SendResult(CAdoSerializationHelper.idDataTableHeaderArrive, UQueue); UQueue.SetSize(0); //monitor socket close event and cancel request if (res == CClientPeer.REQUEST_CANCELED || res == CClientPeer.SOCKET_NOT_FOUND) { break; } foreach (DataRow dr in dt.Rows) { m_AdoSerializer.Push(UQueue, dr); if (UQueue.GetSize() > batchSize) { res = SendResult(CAdoSerializationHelper.idDataTableRowsArrive, UQueue); UQueue.SetSize(0); //monitor socket close event and cancel request if (res == CClientPeer.REQUEST_CANCELED || res == CClientPeer.SOCKET_NOT_FOUND) { nSize = res; break; } else { if (BytesBatched > 2 * batchSize) { bSuc = CommitBatching(); bSuc = StartBatching(); } nSize += res; } } } if (res == CClientPeer.REQUEST_CANCELED || res == CClientPeer.SOCKET_NOT_FOUND) { break; } if (UQueue.GetSize() > 0) //remaining { res = SendResult(CAdoSerializationHelper.idDataTableRowsArrive, UQueue); UQueue.SetSize(0); //monitor socket close event and cancel request if (res == CClientPeer.REQUEST_CANCELED || res == CClientPeer.SOCKET_NOT_FOUND) { nSize = res; break; } else { nSize += res; } } } while (false); UQueue.SetSize(0); res = SendResult(CAdoSerializationHelper.idEndDataTable); if (res == CClientPeer.REQUEST_CANCELED || res == CClientPeer.SOCKET_NOT_FOUND) { nSize = res; } else { nSize += res; } if (!bBatching) { bSuc = CommitBatching(); } } return(nSize); }
internal static void Write(Stream s, CUQueue q) { if (q == null || q.GetSize() == 0) return; s.Write(q.m_bytes, (int)q.HeadPosition, (int)q.GetSize()); }
protected override void OnResultReturned(ushort reqId, CUQueue mc) { switch (reqId) { case DB_CONSTS.idRowsetHeader: { m_Blob.SetSize(0); if (m_Blob.MaxBufferSize > ONE_MEGA_BYTES) { m_Blob.Realloc(ONE_MEGA_BYTES); } CDBColumnInfoArray vColInfo; mc.Load(out vColInfo).Load(out m_indexRowset); KeyValuePair <DRowsetHeader, DRows> p = new KeyValuePair <DRowsetHeader, DRows>(); lock (m_csCache) { m_vData.Clear(); if (m_mapRowset.ContainsKey(m_indexRowset)) { p = m_mapRowset[m_indexRowset]; } } if (p.Key != null) { p.Key.Invoke(vColInfo); } } break; case DB_CONSTS.idBeginRows: m_Blob.SetSize(0); m_vData.Clear(); break; case DB_CONSTS.idTransferring: while (mc.GetSize() > 0) { object vt; mc.Load(out vt); m_vData.Add(vt); } break; case DB_CONSTS.idEndRows: if (mc.GetSize() > 0 || m_vData.Count > 0) { object vt; while (mc.GetSize() > 0) { mc.Load(out vt); m_vData.Add(vt); } DRows row = null; lock (m_csCache) { if (m_mapRowset.ContainsKey(m_indexRowset)) { row = m_mapRowset[m_indexRowset].Value; } } if (row != null) { row.Invoke(m_vData); } } m_vData.Clear(); break; case DB_CONSTS.idStartBLOB: { m_Blob.SetSize(0); uint len; mc.Load(out len); if (len != uint.MaxValue && len > m_Blob.MaxBufferSize) { m_Blob.Realloc(len); } m_Blob.Push(mc.IntenalBuffer, mc.HeadPosition, mc.GetSize()); mc.SetSize(0); } break; case DB_CONSTS.idChunk: m_Blob.Push(mc.IntenalBuffer, mc.GetSize()); mc.SetSize(0); break; case DB_CONSTS.idEndBLOB: if (mc.GetSize() > 0 || m_Blob.GetSize() > 0) { m_Blob.Push(mc.IntenalBuffer, mc.GetSize()); mc.SetSize(0); unsafe { fixed(byte *p = m_Blob.IntenalBuffer) { uint *len = (uint *)(p + m_Blob.HeadPosition + sizeof(ushort)); if (*len >= BLOB_LENGTH_NOT_AVAILABLE) { //length should be reset if BLOB length not available from server side at beginning *len = (m_Blob.GetSize() - sizeof(ushort) - sizeof(uint)); } } } object vt; m_Blob.Load(out vt); m_vData.Add(vt); } break; default: base.OnResultReturned(reqId, mc); break; } }
private int LoadDataReaderRecords(CUQueue UQueue) { if (UQueue.GetSize() == 0) return 0; DataTable dt = m_dt; int nSize = 0; object[] aData = null; //dt.BeginLoadData(); while (UQueue != null && UQueue.GetSize() > 0) { PopDataRecord(UQueue, ref aData); dt.Rows.Add(aData); ++nSize; } //dt.EndLoadData(); return nSize; }
private DataTable LoadDataReaderHeader(CUQueue UQueue) { int n; int nData; short sData; string str; int nFieldCount; if (UQueue.GetSize() == 0) return null; m_dt = new DataTable(); DataTable dt = m_dt; m_bLoadingDataTable = false; UQueue.Load(out nFieldCount); UQueue.Load(out m_nAffected); m_dts = new tagDataTypeSupported[nFieldCount]; for (n = 0; n < nFieldCount; n++) { UQueue.Load(out sData); m_dts[n] = (tagDataTypeSupported)sData; } m_qTemp.SetSize(0); for (n = 0; n < nFieldCount; n++) { UQueue.Load(out nData); DataColumn dc = new DataColumn(); dc.DataType = GetType(m_dts[n]); dc.AllowDBNull = ((nData & (int)tagColumnBit.cbAllowDBNull) == (int)tagColumnBit.cbAllowDBNull); dc.AutoIncrement = ((nData & (int)tagColumnBit.cbIsAutoIncrement) == (int)tagColumnBit.cbIsAutoIncrement); dc.ReadOnly = ((nData & (int)tagColumnBit.cbIsReadOnly) == (int)tagColumnBit.cbIsReadOnly); dc.Unique = ((nData & (int)tagColumnBit.cbIsUnique) == (int)tagColumnBit.cbIsUnique); bool cbIsLong = ((nData & (int)tagColumnBit.cbIsLong) == (int)tagColumnBit.cbIsLong); if ((nData & (int)tagColumnBit.cbIsKey) == (int)tagColumnBit.cbIsKey) { m_qTemp.Save(n); } UQueue.Load(out nData); if (nData > 0 && !cbIsLong && (m_dts[n] == tagDataTypeSupported.dtString || m_dts[n] == tagDataTypeSupported.dtChars)) { dc.MaxLength = nData; //ColumnSize } UQueue.Load(out str); dc.ColumnName = str; dt.Columns.Add(dc); } if (m_qTemp.GetSize() > 0) { int nIndex = 0; DataColumn[] dcs = new DataColumn[m_qTemp.GetSize() / 4]; while (m_qTemp.GetSize() > 0) { m_qTemp.Load(out nData); DataColumn dc = dt.Columns[nData]; dcs[nIndex] = dc; ++nIndex; } dt.PrimaryKey = dcs; } if (UQueue.GetSize() >= 4) { UQueue.Load(out m_nBatchSize); } else { m_nBatchSize = 0; } return dt; }
internal void Load(ushort sRequestID, CUQueue UQueue) { if (UQueue == null) throw new ArgumentException("Invalid input parameter UQueue"); switch (sRequestID) { case idDataSetHeaderArrive: if (UQueue.GetSize() > 0) { m_bDataSet = true; if (m_dt != null && m_dt.Columns.Count > 0) m_dt = new DataTable(); LoadDataSetHeader(UQueue); } break; case idDataReaderHeaderArrive: if (UQueue.GetSize() > 0) { if (m_ds != null && m_ds.Tables.Count > 0) m_ds = new DataSet(); m_bDataReader = true; LoadDataReaderHeader(UQueue); m_dtBackup = m_dt.Clone(); //for better performance RemoveAAU(); } break; case idDataTableHeaderArrive: if (UQueue.GetSize() > 0) { if (!m_bDataSet && m_ds != null && m_ds.Tables.Count > 0) m_ds = new DataSet(); m_bLoadingDataTable = true; LoadDataTableHeader(UQueue); m_dtBackup = m_dt.Clone(); //for better performance RemoveAAU(); if (m_bDataSet) m_ds.Tables.Add(m_dt); } break; case idDataTableRowsArrive: case idDataReaderRecordsArrive: if (UQueue.GetSize() > 0) LoadRows(UQueue); break; case idEndDataReader: if (m_bDataReader) { if (m_dt != null) m_dt.AcceptChanges(); AddAAU(); //reset datatable m_bDataReader = false; } break; case idEndDataTable: if (m_bLoadingDataTable) { AddAAU(); //reset datatable m_bLoadingDataTable = false; } break; case idEndDataSet: if (m_bDataSet) { DataRelationCollection drc = LoadDataSetRelations(UQueue); if (drc != null) { int n; int nSize = drc.Count; for (n = 0; n < nSize; n++) { DataRelation dr = drc[n]; m_ds.Relations.Add(dr); } } m_bDataSet = false; } break; default: break; } }
private uint PopTableColNamesOnly(CUQueue UQueue, ref DataColumn[] dcs) { int n; int count; int ordinal; string tableName; uint start = UQueue.GetSize(); UQueue.Load(out count); dcs = new DataColumn[count]; for (n = 0; n < count; ++n) { UQueue.Load(out tableName); UQueue.Load(out ordinal); dcs[n] = CurrentDataSet.Tables[tableName].Columns[ordinal]; } return (start - UQueue.GetSize()); }
private uint PopDataRecord(CUQueue UQueue, ref object[] aData) { int n; byte bData = 0; byte bOne = 1; if (m_dts == null) throw new InvalidOperationException("DataTable header is not de-serialized yet"); uint nSize = UQueue.GetSize(); int nLen = m_dts.Length; if (aData == null || aData.Length != nLen) aData = new object[nLen]; uint nBits = (uint)(m_dts.Length / 8 + (((m_dts.Length % 8) != 0) ? 1 : 0)); byte[] aBit = new byte[nBits]; UQueue.Pop(out aBit, nBits); for (n = 0; n < nLen; n++) { if ((n % 8) == 0) bData = aBit[n / 8]; if ((bData & (bOne << (byte)(n % 8))) != 0) { aData[n] = DBNull.Value; } else { switch (m_dts[n]) { case tagDataTypeSupported.dtBoolean: { bool myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtByte: { byte myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtChar: { char myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtDateTime: { DateTime myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtDecimal: { decimal myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtDouble: { double myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtFloat: { float myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtGuid: { Guid myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtInt16: { short myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtInt32: { int myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtInt64: { long myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtUDT: case tagDataTypeSupported.dtString: { string myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtBytes: { byte[] buffer; uint nBytes; UQueue.Load(out nBytes); UQueue.Pop(out buffer, nBytes); aData[n] = buffer; } break; case tagDataTypeSupported.dtUInt64: case tagDataTypeSupported.dtUInt32: case tagDataTypeSupported.dtUInt16: case tagDataTypeSupported.dtValue: case tagDataTypeSupported.dtValues: case tagDataTypeSupported.dtTimeSpan: UQueue.Load(out aData[n]); break; default: throw new InvalidOperationException("Unsupported data type for serialization"); } } } return (nSize - UQueue.GetSize()); }
private ulong ExecuteParameters(bool rowset, bool meta, bool lastInsertId, ulong index, out long affected, out int res, out string errMsg, out object vtId) { ulong fail_ok = 0; m_indexCall = index; affected = 0; res = 0; errMsg = ""; vtId = null; ulong fails = m_fails; ulong oks = m_oks; bool ok = true; bool HeaderSent = false; do { if (m_sqlPrepare == null || m_sqlPrepare.Parameters.Count == 0 || m_vParam.Count == 0) { res = -2; errMsg = "No parameter specified"; ++m_fails; break; } int cols = m_sqlPrepare.Parameters.Count; if ((m_vParam.Count % cols) != 0) { res = -2; errMsg = "Bad parameter data array size"; ++m_fails; break; } if (m_trans != null) { m_sqlPrepare.Transaction = m_trans; } int rows = m_vParam.Count / cols; for (int r = 0; r < rows; ++r) { try { int c = 0; foreach (SqlParameter p in m_sqlPrepare.Parameters) { p.Value = m_vParam[r * cols + c]; ++c; } if (rowset) { SqlDataReader reader = m_sqlPrepare.ExecuteReader(meta ? CommandBehavior.KeyInfo : CommandBehavior.Default); while (reader.FieldCount > 0) { ok = PushToClient(reader, meta); HeaderSent = true; if (reader.RecordsAffected > 0) { affected += reader.RecordsAffected; } if (!ok || !reader.NextResult()) { break; } } reader.Close(); } else { int ret = m_sqlPrepare.ExecuteNonQuery(); if (ret > 0) { affected += ret; } } if (ok && m_outputs > 0) { CDBColumnInfoArray v = new CDBColumnInfoArray(); uint ret = SendResult(DB_CONSTS.idRowsetHeader, v, index, (uint)m_outputs); ok = (ret != SOCKET_NOT_FOUND && ret != REQUEST_CANCELED); HeaderSent = true; if (ok) { using (CScopeUQueue sb = new CScopeUQueue()) { CUQueue q = sb.UQueue; foreach (SqlParameter p in m_sqlPrepare.Parameters) { if (p.Direction != ParameterDirection.Input) { q.Save(p.Value); } } ok = (SendResult(DB_CONSTS.idOutputParameter, q.IntenalBuffer, q.GetSize()) == q.GetSize()); } } } ++m_oks; } catch (SqlException err) { if (res == 0) { res = err.ErrorCode; errMsg = err.Message; } ++m_fails; } catch (Exception err) { if (res == 0) { res = -1; errMsg = err.Message; } ++m_fails; } if (!ok) { break; } } } while (false); if (!HeaderSent && ok) { CDBColumnInfoArray v = new CDBColumnInfoArray(); SendResult(DB_CONSTS.idRowsetHeader, v, index); } fail_ok = ((m_fails - fails) << 32); fail_ok += (m_oks - oks); return(fail_ok); }
private uint Pop(CUQueue UQueue, ref DataColumn dc) { bool bNull; uint nLen = UQueue.GetSize(); UQueue.Load(out bNull); if (bNull) dc = null; else { int nData; object ob; short sData; string str; long lData; byte bData; UQueue.Load(out bData); if (dc == null) dc = new DataColumn(); dc.AllowDBNull = ((bData & (int)tagColumnBit.cbAllowDBNull) == (int)tagColumnBit.cbAllowDBNull); dc.AutoIncrement = ((bData & (int)tagColumnBit.cbIsAutoIncrement) == (int)tagColumnBit.cbIsAutoIncrement); dc.ReadOnly = ((bData & (int)tagColumnBit.cbIsReadOnly) == (int)tagColumnBit.cbIsReadOnly); dc.Unique = ((bData & (int)tagColumnBit.cbIsUnique) == (int)tagColumnBit.cbIsUnique); UQueue.Load(out lData); dc.AutoIncrementSeed = lData; UQueue.Load(out lData); dc.AutoIncrementStep = lData; UQueue.Load(out str); dc.Caption = str; UQueue.Load(out bData); dc.ColumnMapping = (MappingType)bData; UQueue.Load(out str); dc.ColumnName = str; UQueue.Load(out sData); dc.DataType = GetType((tagDataTypeSupported)sData); /* UQueue.Pop(out bData); dc.DateTimeMode = (DataSetDateTime)bData;*/ UQueue.Load(out ob); dc.DefaultValue = ob; UQueue.Load(out str); dc.Expression = str; UQueue.Load(out nData); dc.MaxLength = nData; UQueue.Load(out str); dc.Namespace = str; UQueue.Load(out str); dc.Prefix = str; } return (nLen - UQueue.GetSize()); }
private DataTable LoadDataTableHeader(CUQueue UQueue) { int n; bool bNeedChildRelations; bool bNeedbParentRelations; if (UQueue.GetSize() == 0) return null; int nData = 0; byte bData = 0; string str = null; DataTable dt = new DataTable(); m_dt = dt; m_bLoadingDataTable = true; UQueue.Load(out m_nAffected); UQueue.Load(out bData); bNeedChildRelations = ((bData & 2) == 2); bNeedbParentRelations = ((bData & 4) == 4); DataColumnCollection dcc = dt.Columns; UQueue.Load(out str); dt.TableName = str; Pop(UQueue, ref dcc); m_dts = new tagDataTypeSupported[dcc.Count]; for (n = 0; n < dcc.Count; n++) { m_dts[n] = GetDT(dcc[n].DataType.FullName); } UQueue.Load(out str); dt.DisplayExpression = str; UQueue.Load(out nData); dt.MinimumCapacity = nData; UQueue.Load(out str); dt.Namespace = str; UQueue.Load(out str); dt.Prefix = str; UQueue.Load(out nData); DataColumn[] pk = new DataColumn[nData]; for (n = 0; n < nData; n++) { UQueue.Load(out nData); pk[n] = dt.Columns[nData]; } dt.PrimaryKey = pk; if (bNeedbParentRelations) { DataRelationCollection drc = dt.ParentRelations; Pop(UQueue, ref drc); } if (bNeedChildRelations) { DataRelationCollection drc = dt.ChildRelations; Pop(UQueue, ref drc); } if (UQueue.GetSize() >= 4) { UQueue.Load(out m_nBatchSize); } else { m_nBatchSize = 0; } return dt; }
private uint Pop(CUQueue UQueue, ref DataRow dr, ref DataRowState drs) { int n; bool b; string str; byte bData = 0; byte bOne = 1; if (m_dts == null) throw new ArgumentNullException("DataTable header is not de-serialized yet"); uint nSize = UQueue.GetSize(); if (dr == null) throw new ArgumentNullException("Datarow object can't be null"); int nLen = m_dts.Length; if (dr.ItemArray == null || dr.ItemArray.Length != m_dts.Length) throw new InvalidOperationException("Wrong data row object"); object[] aData = new object[nLen]; uint nBits = (uint)(m_dts.Length / 8 + (((m_dts.Length % 8) != 0) ? 1 : 0)); byte[] aBit = new byte[nBits]; UQueue.Pop(out aBit, nBits); for (n = 0; n < nLen; n++) { if ((n % 8) == 0) bData = aBit[n / 8]; if ((bData & (bOne << (byte)(n % 8))) != 0) { aData[n] = DBNull.Value; } else { switch (m_dts[n]) { case tagDataTypeSupported.dtBoolean: { bool myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtByte: { byte myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtChar: { char myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtDateTime: { DateTime myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtDecimal: { decimal myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtDouble: { double myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtFloat: { float myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtGuid: { Guid myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtUInt16: { ushort myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtUInt32: { uint myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtUInt64: { ulong myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtInt16: { short myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtInt32: { int myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtInt64: { long myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtUDT: case tagDataTypeSupported.dtString: { string myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtValue: case tagDataTypeSupported.dtValues: case tagDataTypeSupported.dtChars: case tagDataTypeSupported.dtBytes: case tagDataTypeSupported.dtTimeSpan: UQueue.Load(out aData[n]); break; default: throw new InvalidOperationException("Unsupported data type for serialization"); } } } dr.ItemArray = aData; UQueue.Load(out bData); drs = (DataRowState)bData; UQueue.Load(out b); if (b) { UQueue.Load(out str); dr.RowError = str; } return (nSize - UQueue.GetSize()); }
private int LoadDataTableRows(CUQueue UQueue) { if (UQueue.GetSize() == 0) return 0; int nSize = 0; DataTable dt = m_dt; DataRowState drs = DataRowState.Detached; //dt.BeginLoadData(); while (UQueue != null && UQueue.GetSize() > 0) { DataRow dr = dt.NewRow(); Pop(UQueue, ref dr, ref drs); dt.Rows.Add(dr); switch (drs) { case DataRowState.Added: break; case DataRowState.Unchanged: dr.AcceptChanges(); break; case DataRowState.Modified: dr.AcceptChanges(); { int n; object obj; int nCount = dt.Columns.Count; for (n = 0; n < nCount; ++n) { if (!dt.Columns[n].ReadOnly) { obj = dr[0]; dr[0] = obj; break; } } } break; case DataRowState.Deleted: dr.AcceptChanges(); dr.Delete(); break; default: //DataRowState.Detached throw new InvalidOperationException("Wrong DataRow state"); } ++nSize; } //dt.EndLoadData(); return nSize; }
public virtual ulong Send(IDataReader dr, uint batchSize) { uint res; ulong nSize = 0; bool bSuc; if (dr == null) { throw new ArgumentException("Must pass in a valid data reader interface!"); } using (CScopeUQueue su = new CScopeUQueue()) { CUQueue UQueue = su.UQueue; bool bBatching = Batching; if (!bBatching) { bSuc = StartBatching(); } do { UQueue.SetSize(0); m_AdoSerializer.PushHeader(UQueue, dr); if (batchSize < 2048) { batchSize = 2048; } UQueue.Save(batchSize); nSize = res = SendResult(CAdoSerializationHelper.idDataReaderHeaderArrive, UQueue); UQueue.SetSize(0); //monitor socket close event and cancel request if (res == CClientPeer.REQUEST_CANCELED || res == CClientPeer.SOCKET_NOT_FOUND) { break; } while (dr.Read()) { m_AdoSerializer.Push(UQueue, dr); if (UQueue.GetSize() > batchSize) { res = SendResult(CAdoSerializationHelper.idDataReaderRecordsArrive, UQueue); UQueue.SetSize(0); //monitor socket close event and cancel request if (res == CClientPeer.REQUEST_CANCELED || res == CClientPeer.SOCKET_NOT_FOUND) { nSize = res; break; } else { nSize += res; if (BytesBatched > 2 * batchSize) { //if we find too much are stored in batch queue, we send them and start a new batching bSuc = CommitBatching(); bSuc = StartBatching(); } } } } if (UQueue.GetSize() > 0) //remaining { res = SendResult(CAdoSerializationHelper.idDataReaderRecordsArrive, UQueue); UQueue.SetSize(0); //monitor socket close event and cancel request if (res == CClientPeer.REQUEST_CANCELED || res == CClientPeer.SOCKET_NOT_FOUND) { nSize = res; break; } nSize += res; } } while (false); UQueue.SetSize(0); res = SendResult(CAdoSerializationHelper.idEndDataReader); //monitor socket close event and cancel request if (res == CClientPeer.REQUEST_CANCELED || res == CClientPeer.SOCKET_NOT_FOUND) { nSize = res; } else { nSize += res; } if (!bBatching) { bSuc = CommitBatching(); } } return(nSize); }
private uint Pop(CUQueue UQueue, out ForeignKeyConstraint fkc) { bool b; uint nSize = UQueue.GetSize(); UQueue.Load(out b); if (b) //null { fkc = null; } else { byte bData = 0; string str = null; DataColumn[] dcsChild = null; Pop(UQueue, ref dcsChild); DataColumn[] dcsParent = null; Pop(UQueue, ref dcsParent); fkc = new ForeignKeyConstraint(dcsParent, dcsChild); UQueue.Load(out str); fkc.ConstraintName = str; UQueue.Load(out bData); fkc.AcceptRejectRule = (AcceptRejectRule)bData; UQueue.Load(out bData); fkc.UpdateRule = (Rule)bData; UQueue.Load(out bData); fkc.DeleteRule = (Rule)bData; } return (nSize - UQueue.GetSize()); }
public virtual bool Send(DataTable dt, uint batchSize) { bool bSuc = false; if (AttachedClientSocket == null) { throw new InvalidOperationException("The asynchronous handler must be attached to an instance of CClientSocket first!"); } if (dt == null) { throw new ArgumentNullException("Must pass in a valid data table object!"); } bool rr = RouteeRequest; bool bBatching = Batching; if (!bBatching) { StartBatching(); } using (CScopeUQueue UQueue = new CScopeUQueue()) { CUQueue AdoUQueue = UQueue.UQueue; do { AdoUQueue.SetSize(0); m_AdoSerializer.PushHeader(AdoUQueue, dt, false, false); if (batchSize < 2048) { batchSize = 2048; } AdoUQueue.Save(batchSize); if (rr) { bSuc = SendRouteeResult(AdoUQueue, CAdoSerializationHelper.idDataTableHeaderArrive); } else { bSuc = SendRequest(CAdoSerializationHelper.idDataTableHeaderArrive, AdoUQueue, m_arh); } AdoUQueue.SetSize(0); if (!bSuc) { break; } foreach (DataRow dr in dt.Rows) { m_AdoSerializer.Push(AdoUQueue, dr); if (AdoUQueue.GetSize() > batchSize) { if (rr) { bSuc = SendRouteeResult(AdoUQueue, CAdoSerializationHelper.idDataTableRowsArrive); } else { bSuc = SendRequest(CAdoSerializationHelper.idDataTableRowsArrive, AdoUQueue, m_arh); } AdoUQueue.SetSize(0); if (!bSuc) { break; } if (AttachedClientSocket.BytesBatched > 2 * batchSize) { //if we find too much are stored in batch queue, we send them and start a new batching CommitBatching(true); StartBatching(); } uint nBytesInSendBuffer = AttachedClientSocket.BytesInSendingBuffer; if (nBytesInSendBuffer > 6 * CAdoSerializationHelper.DEFAULT_BATCH_SIZE) //60k { CommitBatching(true); //if we find there are too much data in sending buffer, we wait until all of data are sent and processed. WaitAll(); StartBatching(); } } } if (!bSuc) { break; } if (AdoUQueue.GetSize() > 0) { if (rr) { bSuc = SendRouteeResult(AdoUQueue, CAdoSerializationHelper.idDataTableRowsArrive); } else { bSuc = SendRequest(CAdoSerializationHelper.idDataTableRowsArrive, AdoUQueue, m_arh); } AdoUQueue.SetSize(0); } if (!bSuc) { break; } } while (false); if (bSuc) { if (rr) { SendRouteeResult(CAdoSerializationHelper.idEndDataTable); } else { SendRequest(CAdoSerializationHelper.idEndDataTable, m_arh); } } if (!bBatching) { CommitBatching(true); } } return(bSuc); }
private uint Pop(CUQueue UQueue, ref DataColumn[] dcs) { int n; int nSize; uint nLen = UQueue.GetSize(); UQueue.Load(out nSize); if (nSize == -1) { dcs = null; } else { if (dcs == null || dcs.Length != nSize) dcs = new DataColumn[nSize]; for (n = 0; n < nSize; n++) { Pop(UQueue, ref dcs[n]); } } return (nLen - UQueue.GetSize()); }
public bool SendUserMessage(object Message, string UserId) { using (CScopeUQueue su = new CScopeUQueue()) { CUQueue q = su.UQueue; q.Save(Message); unsafe { fixed(byte *p = q.m_bytes) { return(ServerCoreLoader.SendUserMessage(m_sp.Handle, UserId, p, q.GetSize())); } } } }
private uint Pop(CUQueue UQueue, ref DataColumnCollection Cols) { bool bNull = false; uint nSize = UQueue.GetSize(); UQueue.Load(out bNull); if (bNull) Cols = null; else { int n; int nLen; DataColumn dc = null; Cols.Clear(); UQueue.Load(out nLen); for (n = 0; n < nLen; n++) { Pop(UQueue, ref dc); Cols.Add(dc); dc = null; } } return (nSize - UQueue.GetSize()); }
DataSet LoadDataSetHeader(CUQueue UQueue) { if (UQueue.GetSize() == 0) return null; string str = null; byte bData = 0; DataSet ds = new DataSet(); m_ds = ds; UQueue.Load(out m_nAffected); UQueue.Load(out bData); ds.CaseSensitive = ((bData & 2) == 2); ds.EnforceConstraints = ((bData & 4) == 4); UQueue.Load(out str); ds.DataSetName = str; UQueue.Load(out str); ds.Namespace = str; UQueue.Load(out str); ds.Prefix = str; return ds; }
private uint Pop(CUQueue UQueue, ref UniqueConstraint uc) { bool bNull = false; uint nSize = UQueue.GetSize(); UQueue.Load(out bNull); if (!bNull) { string str; DataColumn[] dcs = null; bool b; Pop(UQueue, ref dcs); UQueue.Load(out str); UQueue.Load(out b); uc = new UniqueConstraint(str, dcs, b); } return nSize - UQueue.GetSize(); }
private DataRelationCollection LoadDataSetRelations(CUQueue UQueue) { DataRelationCollection drc = null; if (UQueue != null && UQueue.GetSize() > 0) { drc = CurrentDataSet.Relations; drc.Clear(); Pop(UQueue, ref drc); } return drc; }
private bool DataFromServerToClient(CAsyncServiceHandler sender, ushort reqId, CUQueue qData) { bool processed = false; switch (reqId) { case CStreamSerializationHelper.idReadDataFromServerToClient: if (qData.GetSize() > 0) { lock (m_cs) { CStreamSerializationHelper.Write(m_s, qData); if (Progress != null) Progress.Invoke(this, (ulong)m_s.Position); } qData.SetSize(0); processed = true; } break; default: break; } return processed; }