/// <summary> /// Query queue keys opened at server side /// </summary> /// <param name="gk">A callback for tracking a list of key names</param> /// <param name="discarded">a callback for tracking cancel or socket closed event</param> /// <returns>true for sending the request successfully, and false for failure</returns> public virtual bool GetKeys(DGetKeys gk, DDiscarded discarded) { return(SendRequest(idGetKeys, (ar) => { CUQueue q = ar.UQueue; if (gk != null) { uint size; q.Load(out size); string[] v = new string[size]; for (uint n = 0; n < size; ++n) { byte[] bytes; q.Load(out bytes); if (bytes != null) { v[n] = Encoding.UTF8.GetString(bytes, 0, bytes.Length); } } gk((CAsyncQueue)ar.AsyncServiceHandler, v); } else { q.SetSize(0); } }, discarded, (DOnExceptionFromServer)null)); }
internal virtual void OnChatComing(tagChatRequestID chatRequestID) { uint svsId = ServerCoreLoader.GetSvsID(m_sh); CUQueue q = m_qBuffer; if (svsId != BaseServiceID.sidHTTP) { bool endian = false; tagOperationSystem os = ServerCoreLoader.GetPeerOs(m_sh, ref endian); q.Endian = endian; q.OS = os; } switch (chatRequestID) { case tagChatRequestID.idEnter: { object objGroups; q.Load(out objGroups); OnSubscribe((uint[])objGroups); } break; case tagChatRequestID.idExit: { OnUnsubscribe(ChatGroups); } break; case tagChatRequestID.idSendUserMessage: { object msg; string user; q.Load(out user).Load(out msg); OnSendUserMessage(user, msg); } break; case tagChatRequestID.idSpeak: { object msg; object groups; q.Load(out groups).Load(out msg); OnPublish(msg, (uint[])groups); } break; default: ServerCoreLoader.SendExceptionResult(m_sh, "Unexpected chat request", Environment.StackTrace, (ushort)chatRequestID, 0); break; } }
public void LoadFrom(CUQueue UQueue) { int size; Clear(); UQueue.Load(out size); while (size > 0) { long n; UQueue.Load(out n); Add(n); --size; } }
protected override void OnResultReturned(ushort sRequestId, CUQueue UQueue) { if (RouteeRequest) { switch (sRequestId) { case piConst.idComputePi: { double dStart; double dStep; int nNum; UQueue.Load(out dStart).Load(out dStep).Load(out nNum); double dX = dStart + dStep / 2; double dd = dStep * 4.0; double ComputeRtn = 0.0; for (int n = 0; n < nNum; n++) { dX += dStep; ComputeRtn += dd / (1 + dX * dX); } SendRouteeResult(ComputeRtn, dStart); } break; default: break; } } }
void QueryPaymentMaxMinAvgs(CUQueue q, ulong reqIndex) { uint ret; string filter; q.Load(out filter); //assuming slave pool has queue name set (request backup) System.Diagnostics.Debug.Assert(CYourServer.Slave.QueueName.Length > 0); ss.CMaxMinAvg pmma = new ss.CMaxMinAvg(); string sql = "SELECT MAX(amount),MIN(amount),AVG(amount) FROM payment"; if (filter != null && filter.Length > 0) sql += (" WHERE " + filter); var handler = CYourServer.Slave.SeekByQueue(); if (handler == null) { ret = SendResultIndex(reqIndex, ss.Consts.idQueryMaxMinAvgs, (int)-1, "No connection to a slave database", pmma); return; } ulong peer_handle = Handle; bool ok = handler.Execute(sql, (h, r, err, affected, fail_ok, vtId) => { //send result if front peer not closed yet if (peer_handle == Handle) ret = SendResultIndex(reqIndex, ss.Consts.idQueryMaxMinAvgs, r, err, pmma); }, (h, vData) => { pmma.Max = double.Parse(vData[0].ToString()); pmma.Min = double.Parse(vData[1].ToString()); pmma.Avg = double.Parse(vData[2].ToString()); }); //should always be true because slave pool has queue name set for request backup System.Diagnostics.Debug.Assert(ok); }
void GetRentalDateTimes(CUQueue q, ulong reqIndex) { uint ret; long rental_id; q.Load(out rental_id); //assuming slave pool has queue name set (request backup) System.Diagnostics.Debug.Assert(CYourServer.Slave.QueueName.Length > 0); ss.CRentalDateTimes myDates = new ss.CRentalDateTimes(); string sql = "SELECT rental_id,rental_date,return_date,last_update FROM rental where rental_id=" + rental_id; var handler = CYourServer.Slave.SeekByQueue(); if (handler == null) { ret = SendResultIndex(reqIndex, ss.Consts.idGetRentalDateTimes, myDates, (int)-1, "No connection to a slave database"); return; } ulong peer_handle = Handle; bool ok = handler.Execute(sql, (h, res, errMsg, affected, fail_ok, vtId) => { //send result if front peer not closed yet if (peer_handle == Handle) { ret = SendResultIndex(reqIndex, ss.Consts.idGetRentalDateTimes, myDates, res, errMsg); } }, (h, vData) => { myDates.rental_id = long.Parse(vData[0].ToString()); myDates.Rental = (DateTime)vData[1]; myDates.Return = (DateTime)vData[2]; myDates.LastUpdate = (DateTime)vData[3]; }); //should always be true because slave pool has queue name set for request backup System.Diagnostics.Debug.Assert(ok); }
protected override void OnResultReturned(ushort sRequestId, CUQueue UQueue) { if (RouteeRequest) { switch (sRequestId) { case piConst.idComputePi: { double dStart; double dStep; int nNum; UQueue.Load(out dStart).Load(out dStep).Load(out nNum); double dX = dStart + dStep / 2; double dd = dStep * 4.0; double ComputeRtn = 0.0; for (int n = 0; n < nNum; n++) { dX += dStep; ComputeRtn += dd / (1 + dX * dX); } SendRouteeResult(ComputeRtn); } break; default: break; } } }
private void EndBLOB() { Chunk(); object vt; m_Blob.Load(out vt); m_vParam.Add(vt); }
private void Transferring() { CUQueue q = UQueue; while (q.GetSize() > 0) { object vt; q.Load(out vt); m_vParam.Add(vt); } }
protected override void OnResultReturned(ushort reqId, CUQueue mc) { switch (reqId) { case idClose: case idEnqueue: mc.SetSize(0); break; case idBatchSizeNotified: mc.Load(out m_nBatchSize); break; default: break; } }
//make sure both serialization and de-serialization match against each other. public void LoadFrom(CUQueue UQueue) { UQueue.Load(out NullString) .Load(out ObjectNull) .Load(out ADateTime) .Load(out ADouble) .Load(out ABool) .Load(out UnicodeString) //UTF16-lowendian .Load(out AsciiString) .Load(out ObjBool) .Load(out ObjString) //UTF16-lowendian .Load(out objArrString) //UTF16-lowendian .Load(out objArrInt) ; }
protected override void OnResultReturned(ushort reqId, CUQueue mc) { switch (reqId) { case idSQLGetInfo: lock (m_csDB) { m_mapInfo.Clear(); while (mc.GetSize() > 0) { ushort infoType; object infoValue; mc.Load(out infoType).Load(out infoValue); m_mapInfo[infoType] = infoValue; } } break; default: base.OnResultReturned(reqId, mc); break; } }
internal void onRR(ushort reqId, CUQueue mc) { if (tagBaseRequestID.idInterrupt == (tagBaseRequestID)reqId) { ulong options; mc.Load(out options); OnInterrupted(options); return; } MyKeyValue <ushort, CResultCb> p = GetAsyncResultHandler(reqId); do { if (p != null && p.Value != null && p.Value.AsyncResultHandler != null) { m_ar.Reset(reqId, mc, p.Value.AsyncResultHandler); p.Value.AsyncResultHandler.Invoke(m_ar); break; } bool processed = false; lock (m_cs) { foreach (DOnResultReturned r in m_lstRR) { if (r.Invoke(this, reqId, mc)) { processed = true; break; } } } if (processed) { break; } OnResultReturned(reqId, mc); } while (false); }
protected override void OnResultReturned(ushort reqId, CUQueue mc) { switch (reqId) { case idDBUpdate: if (mc.GetSize() > 0) { int dbEventType; string dbInstance, dbPath, tablePath; object idRow; mc.Load(out dbEventType).Load(out dbInstance).Load(out dbPath).Load(out tablePath).Load(out idRow); if (DBEvent != null) { DBEvent(this, (tagUpdateEvent)dbEventType, dbInstance, dbPath, tablePath, idRow); } } break; default: base.OnResultReturned(reqId, mc); break; } }
void UploadEmployees(CUQueue q, ulong reqIndex) { uint ret; KeyValuePair <int, string> error = new KeyValuePair <int, string>(); ss.CInt64Array vId = new ss.CInt64Array(); CDBVariantArray vData; q.Load(out vData); if (vData.Count == 0) { ret = SendResultIndex(reqIndex, ss.Consts.idUploadEmployees, (int)0, "", vId); return; } else if ((vData.Count % 3) != 0) { ret = SendResultIndex(reqIndex, ss.Consts.idUploadEmployees, (int)-1, "Data array size is wrong", vId); return; } //use master for insert, update and delete var handler = CYourServer.Master.Lock(); //use Lock and Unlock to avoid SQL stream overlap on a session within a multi-thread environment if (handler == null) { ret = SendResultIndex(reqIndex, ss.Consts.idUploadEmployees, (int)-2, "No connection to a master database", vId); return; } CClientSocket cs = handler.AttachedClientSocket; do { if (!handler.BeginTrans() || !handler.Prepare("INSERT INTO mysample.EMPLOYEE(CompanyId,Name,JoinDate)VALUES(?,?,?)")) { break; } bool ok = true; CDBVariantArray v = new CDBVariantArray(); int rows = vData.Count / 3; for (int n = 0; n < rows; ++n) { v.Add(vData[n * 3 + 0]); v.Add(vData[n * 3 + 1]); v.Add(vData[n * 3 + 2]); ok = handler.Execute(v, (h, r, err, affected, fail_ok, vtId) => { if (r != 0) { if (error.Key == 0) { error = new KeyValuePair <int, string>(r, err); } vId.Add(-1); } else { vId.Add(long.Parse(vtId.ToString())); } }); if (!ok) { break; } v.Clear(); } if (!ok) { break; } ulong peer_handle = Handle; if (!handler.EndTrans(tagRollbackPlan.rpRollbackErrorAll, (h, res, errMsg) => { //send result if front peer not closed yet if (peer_handle == Handle) { if (res != 0 && error.Key == 0) { error = new KeyValuePair <int, string>(res, errMsg); } ret = SendResultIndex(reqIndex, ss.Consts.idUploadEmployees, error.Key, error.Value, vId); } }, (h, canceled) => { //send error message if front peer not closed yet if (peer_handle == Handle) { //socket closed after requests are put on wire if (error.Key == 0) { error = new KeyValuePair <int, string>(cs.ErrorCode, cs.ErrorMsg); } ret = SendResultIndex(reqIndex, ss.Consts.idUploadEmployees, error.Key, error.Value, vId); } })) { break; } //put handler back into pool as soon as possible for reuse as long as socket connection is not closed yet CYourServer.Master.Unlock(handler); return; } while (false); ret = SendResultIndex(reqIndex, ss.Consts.idUploadEmployees, cs.ErrorCode, cs.ErrorMsg, vId); }
public void LoadFrom(CUQueue UQueue) { UQueue.Load(out rental_id).Load(out Rental).Load(out Return).Load(out LastUpdate); }
public void LoadFrom(CUQueue UQueue) { UQueue.Load(out Max).Load(out Min).Load(out Avg); }
private uint Pop(CUQueue UQueue, ref DataRow dr, ref DataRowState drs) { int n; bool b; string str; byte bData = 0; byte bOne = 1; if (m_dts == null) throw new ArgumentNullException("DataTable header is not de-serialized yet"); uint nSize = UQueue.GetSize(); if (dr == null) throw new ArgumentNullException("Datarow object can't be null"); int nLen = m_dts.Length; if (dr.ItemArray == null || dr.ItemArray.Length != m_dts.Length) throw new InvalidOperationException("Wrong data row object"); object[] aData = new object[nLen]; uint nBits = (uint)(m_dts.Length / 8 + (((m_dts.Length % 8) != 0) ? 1 : 0)); byte[] aBit = new byte[nBits]; UQueue.Pop(out aBit, nBits); for (n = 0; n < nLen; n++) { if ((n % 8) == 0) bData = aBit[n / 8]; if ((bData & (bOne << (byte)(n % 8))) != 0) { aData[n] = DBNull.Value; } else { switch (m_dts[n]) { case tagDataTypeSupported.dtBoolean: { bool myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtByte: { byte myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtChar: { char myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtDateTime: { DateTime myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtDecimal: { decimal myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtDouble: { double myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtFloat: { float myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtGuid: { Guid myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtUInt16: { ushort myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtUInt32: { uint myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtUInt64: { ulong myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtInt16: { short myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtInt32: { int myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtInt64: { long myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtUDT: case tagDataTypeSupported.dtString: { string myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtValue: case tagDataTypeSupported.dtValues: case tagDataTypeSupported.dtChars: case tagDataTypeSupported.dtBytes: case tagDataTypeSupported.dtTimeSpan: UQueue.Load(out aData[n]); break; default: throw new InvalidOperationException("Unsupported data type for serialization"); } } } dr.ItemArray = aData; UQueue.Load(out bData); drs = (DataRowState)bData; UQueue.Load(out b); if (b) { UQueue.Load(out str); dr.RowError = str; } return (nSize - UQueue.GetSize()); }
protected override void OnResultReturned(ushort reqId, CUQueue mc) { switch (reqId) { case idDownload: { int res; string errMsg; mc.Load(out res).Load(out errMsg); DDownload dl = null; lock (m_csFile) { if (m_vContext.Count > 0) { CContext ctx = m_vContext[0]; ctx.ErrCode = res; ctx.ErrMsg = errMsg; dl = ctx.Download; } } if (dl != null) { dl.Invoke(this, res, errMsg); } lock (m_csFile) { if (m_vContext.Count > 0) { CloseFile(m_vContext.RemoveFromFront()); } } OnPostProcessing(0, 0); } break; case idStartDownloading: lock (m_csFile) { long fileSize; string localFile, remoteFile; uint flags; long initSize; mc.Load(out fileSize).Load(out localFile).Load(out remoteFile).Load(out flags).Load(out initSize); lock (m_csFile) { if (m_vContext.Count == 0) { CContext ctx = new CContext(false, flags); ctx.LocalFile = localFile; ctx.FilePath = remoteFile; OpenLocalWrite(ctx); ctx.InitSize = initSize; m_vContext.AddToBack(ctx); } CContext context = m_vContext[0]; context.FileSize = fileSize; initSize = (context.InitSize > 0) ? context.InitSize : 0; if (context.File.Position > initSize) { context.File.SetLength(initSize); } } } break; case idDownloading: { long downloaded = 0; DTransferring trans = null; CContext context = null; lock (m_csFile) { if (m_vContext.Count > 0) { context = m_vContext[0]; trans = context.Transferring; byte[] buffer = mc.IntenalBuffer; try { context.File.Write(buffer, 0, (int)mc.GetSize()); long initSize = (context.InitSize > 0) ? context.InitSize : 0; downloaded = context.File.Position - initSize; } catch (System.IO.IOException err) { context.ErrMsg = err.Message; #if NO_HRESULT context.ErrCode = CANNOT_OPEN_LOCAL_FILE_FOR_WRITING; #else context.ErrCode = err.HResult; #endif } } } mc.SetSize(0); if (context != null && context.HasError) { if (context.Download != null) { context.Download.Invoke(this, context.ErrCode, context.ErrMsg); } CloseFile(m_vContext.RemoveFromFront()); OnPostProcessing(0, 0); } else if (trans != null) { trans.Invoke(this, downloaded); } } break; case idUploadBackup: break; case idUpload: { CContext context = null; int res; string errMsg; mc.Load(out res).Load(out errMsg); if (res != 0 || (errMsg != null && errMsg.Length > 0)) { lock (m_csFile) { if (m_vContext.Count > 0) { context = m_vContext[0]; mc.Load(out context.InitSize); context.ErrCode = res; context.ErrMsg = errMsg; } } } else { CClientSocket cs = Socket; lock (m_csFile) { if (m_vContext.Count > 0) { context = m_vContext[0]; mc.Load(out context.InitSize); using (CScopeUQueue sb = new CScopeUQueue()) { DAsyncResultHandler rh = null; DOnExceptionFromServer se = null; if (sb.UQueue.MaxBufferSize < STREAM_CHUNK_SIZE) { sb.UQueue.Realloc(STREAM_CHUNK_SIZE); } byte[] buffer = sb.UQueue.IntenalBuffer; try { context.QueueOk = cs.ClientQueue.StartJob(); bool queue_enabled = cs.ClientQueue.Available; if (queue_enabled) { SendRequest(idUploadBackup, context.FilePath, context.Flags, context.FileSize, context.InitSize, rh, context.Discarded, se); } int ret = context.File.Read(buffer, 0, (int)STREAM_CHUNK_SIZE); while (ret == STREAM_CHUNK_SIZE) { if (!SendRequest(idUploading, buffer, (uint)ret, rh, context.Discarded, se)) { context.ErrCode = cs.ErrorCode; context.ErrMsg = cs.ErrorMsg; break; } ret = context.File.Read(buffer, 0, (int)STREAM_CHUNK_SIZE); if (queue_enabled) { //save file into client message queue } else if (cs.BytesInSendingBuffer > 40 * STREAM_CHUNK_SIZE) { break; } } if (ret > 0 && !context.HasError) { if (!SendRequest(idUploading, buffer, (uint)ret, rh, context.Discarded, se)) { context.ErrCode = cs.ErrorCode; context.ErrMsg = cs.ErrorMsg; } } if (ret < STREAM_CHUNK_SIZE && !context.HasError) { context.Sent = true; SendRequest(idUploadCompleted, rh, context.Discarded, se); if (context.QueueOk) { Socket.ClientQueue.EndJob(); } } } catch (System.IO.IOException err) { errMsg = err.Message; #if NO_HRESULT res = CANNOT_OPEN_LOCAL_FILE_FOR_READING; #else res = err.HResult; #endif context.ErrCode = res; context.ErrMsg = errMsg; } } } } } if (context != null && context.HasError) { if (context.Upload != null) { context.Upload.Invoke(this, context.ErrCode, context.ErrMsg); } lock (m_csFile) { CloseFile(m_vContext.RemoveFromFront()); } if (context.QueueOk) { Socket.ClientQueue.AbortJob(); } OnPostProcessing(0, 0); } } break; case idUploading: { int errCode = 0; string errMsg = ""; CContext context = null; DTransferring trans = null; long uploaded; mc.Load(out uploaded); if (mc.GetSize() >= 8) { mc.Load(out errCode).Load(out errMsg); } lock (m_csFile) { if (m_vContext.Count > 0) { context = m_vContext[0]; trans = context.Transferring; if (uploaded < 0 || errCode != 0 || errMsg.Length != 0) { context.ErrCode = errCode; context.ErrMsg = errMsg; CloseFile(context); } else if (!context.Sent) { using (CScopeUQueue sb = new CScopeUQueue()) { DAsyncResultHandler rh = null; DOnExceptionFromServer se = null; if (sb.UQueue.MaxBufferSize < STREAM_CHUNK_SIZE) { sb.UQueue.Realloc(STREAM_CHUNK_SIZE); } byte[] buffer = sb.UQueue.IntenalBuffer; try { int ret = context.File.Read(buffer, 0, (int)STREAM_CHUNK_SIZE); if (ret > 0) { SendRequest(idUploading, buffer, (uint)ret, rh, context.Discarded, se); } if (ret < STREAM_CHUNK_SIZE) { context.Sent = true; SendRequest(idUploadCompleted, rh, context.Discarded, se); } } catch (System.IO.IOException err) { context.ErrMsg = err.Message; #if NO_HRESULT context.ErrCode = CANNOT_OPEN_LOCAL_FILE_FOR_READING; #else context.ErrCode = err.HResult; #endif } } } } } if (context != null && context.HasError) { if (context.Upload != null) { context.Upload.Invoke(this, context.ErrCode, context.ErrMsg); } lock (m_csFile) { CloseFile(m_vContext.RemoveFromFront()); } OnPostProcessing(0, 0); } else if (trans != null) { trans.Invoke(this, uploaded); } } break; case idUploadCompleted: { DUpload upl = null; lock (m_csFile) { if (m_vContext.Count > 0) { if (m_vContext[0].File != null) { upl = m_vContext[0].Upload; } else { m_vContext[0].QueueOk = false; m_vContext[0].Sent = false; CloseFile(m_vContext[0]); } } } if (upl != null) { upl.Invoke(this, 0, ""); } lock (m_csFile) { if (m_vContext.Count > 0) { if (m_vContext[0].File != null) { CloseFile(m_vContext.RemoveFromFront()); } } } OnPostProcessing(0, 0); } break; default: base.OnResultReturned(reqId, mc); break; } }
DataSet LoadDataSetHeader(CUQueue UQueue) { if (UQueue.GetSize() == 0) return null; string str = null; byte bData = 0; DataSet ds = new DataSet(); m_ds = ds; UQueue.Load(out m_nAffected); UQueue.Load(out bData); ds.CaseSensitive = ((bData & 2) == 2); ds.EnforceConstraints = ((bData & 4) == 4); UQueue.Load(out str); ds.DataSetName = str; UQueue.Load(out str); ds.Namespace = str; UQueue.Load(out str); ds.Prefix = str; return ds; }
public CUQueue Load <T>(out T receiver) { m_UQueue.Load <T>(out receiver); return(m_UQueue); }
private uint PopTableColNamesOnly(CUQueue UQueue, ref DataColumn[] dcs) { int n; int count; int ordinal; string tableName; uint start = UQueue.GetSize(); UQueue.Load(out count); dcs = new DataColumn[count]; for (n = 0; n < count; ++n) { UQueue.Load(out tableName); UQueue.Load(out ordinal); dcs[n] = CurrentDataSet.Tables[tableName].Columns[ordinal]; } return (start - UQueue.GetSize()); }
private uint Pop(CUQueue UQueue, ref DataColumn[] dcs) { int n; int nSize; uint nLen = UQueue.GetSize(); UQueue.Load(out nSize); if (nSize == -1) { dcs = null; } else { if (dcs == null || dcs.Length != nSize) dcs = new DataColumn[nSize]; for (n = 0; n < nSize; n++) { Pop(UQueue, ref dcs[n]); } } return (nLen - UQueue.GetSize()); }
private uint Pop(CUQueue UQueue, out ForeignKeyConstraint fkc) { bool b; uint nSize = UQueue.GetSize(); UQueue.Load(out b); if (b) //null { fkc = null; } else { byte bData = 0; string str = null; DataColumn[] dcsChild = null; Pop(UQueue, ref dcsChild); DataColumn[] dcsParent = null; Pop(UQueue, ref dcsParent); fkc = new ForeignKeyConstraint(dcsParent, dcsChild); UQueue.Load(out str); fkc.ConstraintName = str; UQueue.Load(out bData); fkc.AcceptRejectRule = (AcceptRejectRule)bData; UQueue.Load(out bData); fkc.UpdateRule = (Rule)bData; UQueue.Load(out bData); fkc.DeleteRule = (Rule)bData; } return (nSize - UQueue.GetSize()); }
private uint Pop(CUQueue UQueue, ref UniqueConstraint uc) { bool bNull = false; uint nSize = UQueue.GetSize(); UQueue.Load(out bNull); if (!bNull) { string str; DataColumn[] dcs = null; bool b; Pop(UQueue, ref dcs); UQueue.Load(out str); UQueue.Load(out b); uc = new UniqueConstraint(str, dcs, b); } return nSize - UQueue.GetSize(); }
private DataTable LoadDataTableHeader(CUQueue UQueue) { int n; bool bNeedChildRelations; bool bNeedbParentRelations; if (UQueue.GetSize() == 0) return null; int nData = 0; byte bData = 0; string str = null; DataTable dt = new DataTable(); m_dt = dt; m_bLoadingDataTable = true; UQueue.Load(out m_nAffected); UQueue.Load(out bData); bNeedChildRelations = ((bData & 2) == 2); bNeedbParentRelations = ((bData & 4) == 4); DataColumnCollection dcc = dt.Columns; UQueue.Load(out str); dt.TableName = str; Pop(UQueue, ref dcc); m_dts = new tagDataTypeSupported[dcc.Count]; for (n = 0; n < dcc.Count; n++) { m_dts[n] = GetDT(dcc[n].DataType.FullName); } UQueue.Load(out str); dt.DisplayExpression = str; UQueue.Load(out nData); dt.MinimumCapacity = nData; UQueue.Load(out str); dt.Namespace = str; UQueue.Load(out str); dt.Prefix = str; UQueue.Load(out nData); DataColumn[] pk = new DataColumn[nData]; for (n = 0; n < nData; n++) { UQueue.Load(out nData); pk[n] = dt.Columns[nData]; } dt.PrimaryKey = pk; if (bNeedbParentRelations) { DataRelationCollection drc = dt.ParentRelations; Pop(UQueue, ref drc); } if (bNeedChildRelations) { DataRelationCollection drc = dt.ChildRelations; Pop(UQueue, ref drc); } if (UQueue.GetSize() >= 4) { UQueue.Load(out m_nBatchSize); } else { m_nBatchSize = 0; } return dt; }
protected override void OnResultReturned(ushort reqId, CUQueue mc) { switch (reqId) { case idDownload: { int res; string errMsg; mc.Load(out res).Load(out errMsg); DDownload dl; lock (m_csFile) { CContext context = m_vContext[0]; if (context.File != null) { context.File.Close(); context.File = null; } else if (res == 0) { res = CANNOT_OPEN_LOCAL_FILE_FOR_WRITING; errMsg = context.ErrMsg; } dl = context.Download; } if (dl != null) { dl(this, res, errMsg); } lock (m_csFile) { m_vContext.RemoveFromFront(); } } break; case idStartDownloading: lock (m_csFile) { CContext context = m_vContext[0]; mc.Load(out context.FileSize); try { FileMode fm; if ((context.Flags & FILE_OPEN_TRUNCACTED) == FILE_OPEN_TRUNCACTED) { fm = FileMode.Create; } else if ((context.Flags & FILE_OPEN_APPENDED) == FILE_OPEN_APPENDED) { fm = FileMode.Append; } else { fm = FileMode.OpenOrCreate; } FileShare fs = FileShare.None; if ((context.Flags & FILE_OPEN_SHARE_WRITE) == FILE_OPEN_SHARE_WRITE) { fs = FileShare.Write; } context.File = new FileStream(context.LocalFile, fm, FileAccess.Write, fs); } catch (Exception err) { context.ErrMsg = err.Message; } finally { } } break; case idDownloading: { long downloaded = -1; DTransferring trans = null; lock (m_cs) { CContext context = m_vContext[0]; trans = context.Transferring; if (context.File != null) { byte[] buffer = mc.IntenalBuffer; context.File.Write(buffer, 0, (int)mc.GetSize()); downloaded = context.File.Position; } } mc.SetSize(0); if (trans != null) { trans(this, downloaded); } } break; case idUpload: { bool removed = false; DUpload upl = null; int res; string errMsg; mc.Load(out res).Load(out errMsg); if (res != 0) { lock (m_csFile) { CContext context = m_vContext[0]; removed = true; upl = context.Upload; if (context.File != null) { context.File.Close(); } } } if (upl != null) { upl(this, res, errMsg); } if (removed) { lock (m_csFile) { m_vContext.RemoveFromFront(); } } } break; case idUploading: { DTransferring trans = null; long uploaded; mc.Load(out uploaded); if (uploaded > 0) { lock (m_csFile) { CContext context = m_vContext[0]; trans = context.Transferring; } } if (trans != null) { trans(this, uploaded); } } break; case idUploadCompleted: { DUpload upl = null; lock (m_csFile) { CContext context = m_vContext[0]; upl = context.Upload; if (context.File != null) { context.File.Close(); context.File = null; } } if (upl != null) { upl(this, 0, ""); } lock (m_csFile) { m_vContext.RemoveFromFront(); } } break; default: base.OnResultReturned(reqId, mc); break; } lock (m_csFile) { Transfer(); } }
private uint PopDataRecord(CUQueue UQueue, ref object[] aData) { int n; byte bData = 0; byte bOne = 1; if (m_dts == null) throw new InvalidOperationException("DataTable header is not de-serialized yet"); uint nSize = UQueue.GetSize(); int nLen = m_dts.Length; if (aData == null || aData.Length != nLen) aData = new object[nLen]; uint nBits = (uint)(m_dts.Length / 8 + (((m_dts.Length % 8) != 0) ? 1 : 0)); byte[] aBit = new byte[nBits]; UQueue.Pop(out aBit, nBits); for (n = 0; n < nLen; n++) { if ((n % 8) == 0) bData = aBit[n / 8]; if ((bData & (bOne << (byte)(n % 8))) != 0) { aData[n] = DBNull.Value; } else { switch (m_dts[n]) { case tagDataTypeSupported.dtBoolean: { bool myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtByte: { byte myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtChar: { char myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtDateTime: { DateTime myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtDecimal: { decimal myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtDouble: { double myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtFloat: { float myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtGuid: { Guid myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtInt16: { short myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtInt32: { int myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtInt64: { long myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtUDT: case tagDataTypeSupported.dtString: { string myData; UQueue.Load(out myData); aData[n] = myData; } break; case tagDataTypeSupported.dtBytes: { byte[] buffer; uint nBytes; UQueue.Load(out nBytes); UQueue.Pop(out buffer, nBytes); aData[n] = buffer; } break; case tagDataTypeSupported.dtUInt64: case tagDataTypeSupported.dtUInt32: case tagDataTypeSupported.dtUInt16: case tagDataTypeSupported.dtValue: case tagDataTypeSupported.dtValues: case tagDataTypeSupported.dtTimeSpan: UQueue.Load(out aData[n]); break; default: throw new InvalidOperationException("Unsupported data type for serialization"); } } } return (nSize - UQueue.GetSize()); }
private uint Pop(CUQueue UQueue, ref DataColumnCollection Cols) { bool bNull = false; uint nSize = UQueue.GetSize(); UQueue.Load(out bNull); if (bNull) Cols = null; else { int n; int nLen; DataColumn dc = null; Cols.Clear(); UQueue.Load(out nLen); for (n = 0; n < nLen; n++) { Pop(UQueue, ref dc); Cols.Add(dc); dc = null; } } return (nSize - UQueue.GetSize()); }
private DataTable LoadDataReaderHeader(CUQueue UQueue) { int n; int nData; short sData; string str; int nFieldCount; if (UQueue.GetSize() == 0) return null; m_dt = new DataTable(); DataTable dt = m_dt; m_bLoadingDataTable = false; UQueue.Load(out nFieldCount); UQueue.Load(out m_nAffected); m_dts = new tagDataTypeSupported[nFieldCount]; for (n = 0; n < nFieldCount; n++) { UQueue.Load(out sData); m_dts[n] = (tagDataTypeSupported)sData; } m_qTemp.SetSize(0); for (n = 0; n < nFieldCount; n++) { UQueue.Load(out nData); DataColumn dc = new DataColumn(); dc.DataType = GetType(m_dts[n]); dc.AllowDBNull = ((nData & (int)tagColumnBit.cbAllowDBNull) == (int)tagColumnBit.cbAllowDBNull); dc.AutoIncrement = ((nData & (int)tagColumnBit.cbIsAutoIncrement) == (int)tagColumnBit.cbIsAutoIncrement); dc.ReadOnly = ((nData & (int)tagColumnBit.cbIsReadOnly) == (int)tagColumnBit.cbIsReadOnly); dc.Unique = ((nData & (int)tagColumnBit.cbIsUnique) == (int)tagColumnBit.cbIsUnique); bool cbIsLong = ((nData & (int)tagColumnBit.cbIsLong) == (int)tagColumnBit.cbIsLong); if ((nData & (int)tagColumnBit.cbIsKey) == (int)tagColumnBit.cbIsKey) { m_qTemp.Save(n); } UQueue.Load(out nData); if (nData > 0 && !cbIsLong && (m_dts[n] == tagDataTypeSupported.dtString || m_dts[n] == tagDataTypeSupported.dtChars)) { dc.MaxLength = nData; //ColumnSize } UQueue.Load(out str); dc.ColumnName = str; dt.Columns.Add(dc); } if (m_qTemp.GetSize() > 0) { int nIndex = 0; DataColumn[] dcs = new DataColumn[m_qTemp.GetSize() / 4]; while (m_qTemp.GetSize() > 0) { m_qTemp.Load(out nData); DataColumn dc = dt.Columns[nData]; dcs[nIndex] = dc; ++nIndex; } dt.PrimaryKey = dcs; } if (UQueue.GetSize() >= 4) { UQueue.Load(out m_nBatchSize); } else { m_nBatchSize = 0; } return dt; }
protected override void OnResultReturned(ushort reqId, CUQueue mc) { switch (reqId) { case DB_CONSTS.idRowsetHeader: { m_Blob.SetSize(0); if (m_Blob.MaxBufferSize > ONE_MEGA_BYTES) { m_Blob.Realloc(ONE_MEGA_BYTES); } CDBColumnInfoArray vColInfo; mc.Load(out vColInfo).Load(out m_indexRowset); KeyValuePair <DRowsetHeader, DRows> p = new KeyValuePair <DRowsetHeader, DRows>(); lock (m_csCache) { m_vData.Clear(); if (m_mapRowset.ContainsKey(m_indexRowset)) { p = m_mapRowset[m_indexRowset]; } } if (p.Key != null) { p.Key.Invoke(vColInfo); } } break; case DB_CONSTS.idBeginRows: m_Blob.SetSize(0); m_vData.Clear(); break; case DB_CONSTS.idTransferring: while (mc.GetSize() > 0) { object vt; mc.Load(out vt); m_vData.Add(vt); } break; case DB_CONSTS.idEndRows: if (mc.GetSize() > 0 || m_vData.Count > 0) { object vt; while (mc.GetSize() > 0) { mc.Load(out vt); m_vData.Add(vt); } DRows row = null; lock (m_csCache) { if (m_mapRowset.ContainsKey(m_indexRowset)) { row = m_mapRowset[m_indexRowset].Value; } } if (row != null) { row.Invoke(m_vData); } } m_vData.Clear(); break; case DB_CONSTS.idStartBLOB: { m_Blob.SetSize(0); uint len; mc.Load(out len); if (len != uint.MaxValue && len > m_Blob.MaxBufferSize) { m_Blob.Realloc(len); } m_Blob.Push(mc.IntenalBuffer, mc.HeadPosition, mc.GetSize()); mc.SetSize(0); } break; case DB_CONSTS.idChunk: m_Blob.Push(mc.IntenalBuffer, mc.GetSize()); mc.SetSize(0); break; case DB_CONSTS.idEndBLOB: if (mc.GetSize() > 0 || m_Blob.GetSize() > 0) { m_Blob.Push(mc.IntenalBuffer, mc.GetSize()); mc.SetSize(0); unsafe { fixed(byte *p = m_Blob.IntenalBuffer) { uint *len = (uint *)(p + m_Blob.HeadPosition + sizeof(ushort)); if (*len >= BLOB_LENGTH_NOT_AVAILABLE) { //length should be reset if BLOB length not available from server side at beginning *len = (m_Blob.GetSize() - sizeof(ushort) - sizeof(uint)); } } } object vt; m_Blob.Load(out vt); m_vData.Add(vt); } break; default: base.OnResultReturned(reqId, mc); break; } }
private uint Pop(CUQueue UQueue, ref DataColumn dc) { bool bNull; uint nLen = UQueue.GetSize(); UQueue.Load(out bNull); if (bNull) dc = null; else { int nData; object ob; short sData; string str; long lData; byte bData; UQueue.Load(out bData); if (dc == null) dc = new DataColumn(); dc.AllowDBNull = ((bData & (int)tagColumnBit.cbAllowDBNull) == (int)tagColumnBit.cbAllowDBNull); dc.AutoIncrement = ((bData & (int)tagColumnBit.cbIsAutoIncrement) == (int)tagColumnBit.cbIsAutoIncrement); dc.ReadOnly = ((bData & (int)tagColumnBit.cbIsReadOnly) == (int)tagColumnBit.cbIsReadOnly); dc.Unique = ((bData & (int)tagColumnBit.cbIsUnique) == (int)tagColumnBit.cbIsUnique); UQueue.Load(out lData); dc.AutoIncrementSeed = lData; UQueue.Load(out lData); dc.AutoIncrementStep = lData; UQueue.Load(out str); dc.Caption = str; UQueue.Load(out bData); dc.ColumnMapping = (MappingType)bData; UQueue.Load(out str); dc.ColumnName = str; UQueue.Load(out sData); dc.DataType = GetType((tagDataTypeSupported)sData); /* UQueue.Pop(out bData); dc.DateTimeMode = (DataSetDateTime)bData;*/ UQueue.Load(out ob); dc.DefaultValue = ob; UQueue.Load(out str); dc.Expression = str; UQueue.Load(out nData); dc.MaxLength = nData; UQueue.Load(out str); dc.Namespace = str; UQueue.Load(out str); dc.Prefix = str; } return (nLen - UQueue.GetSize()); }
private void OnReqArrive(ulong hSocket, ushort usRequestID, uint len) { CSocketPeer sp = Seek(hSocket); if (sp == null) { return; } sp.m_CurrReqID = usRequestID; CUQueue q = sp.m_qBuffer; q.SetSize(0); if (len > q.MaxBufferSize) { q.Realloc(len); } if (len > 0) { uint res; unsafe { fixed(byte *buffer = q.m_bytes) { if (m_nMainThreads <= 1) { CUQueue.CopyMemory(buffer, (void *)ServerCoreLoader.GetRequestBuffer(hSocket), len); res = len; } else { res = ServerCoreLoader.RetrieveBuffer(hSocket, len, buffer, false); } } } System.Diagnostics.Debug.Assert(res == len); q.SetSize(res); } if (m_svsId != BaseServiceID.sidHTTP) { q.OS = sp.m_os; q.Endian = sp.m_endian; if ((tagBaseRequestID)usRequestID == tagBaseRequestID.idInterrupt) { CClientPeer cp = (CClientPeer)sp; ulong options; q.Load(out options); cp.OnIntNotified(options); return; } } else { CHttpPeerBase hp = (CHttpPeerBase)sp; hp.m_WebRequestName = null; hp.m_vArg.Clear(); if (usRequestID == (ushort)tagHttpRequestID.idUserRequest) { uint count; sbyte[] reqName; q.Load(out reqName); hp.m_WebRequestName = CUQueue.ToString(reqName); q.Load(out count); for (uint n = 0; n < count; ++n) { object arg; q.Load(out arg); hp.m_vArg.Add(arg); } } } sp.OnRArrive(usRequestID, len); }
private uint Pop(CUQueue UQueue, ref DataRelationCollection drc) { int n; string str; bool b; int nData; uint nSize = UQueue.GetSize(); UQueue.Load(out nData); drc.Clear(); for (n = 0; n < nData; n++) { DataColumn[] dcsChild = null; PopTableColNamesOnly(UQueue, ref dcsChild); UQueue.Load(out b); UQueue.Load(out str); DataColumn[] dcsParent = null; PopTableColNamesOnly(UQueue, ref dcsParent); DataRelation dr = new DataRelation(str, dcsParent, dcsChild); dr.Nested = b; drc.Add(dr); } return (nSize - UQueue.GetSize()); }