// This function only used for Stored procedure testing purpose (Speed up testing) private LogCollection DumyData() { LogCollection logTemplates = new LogCollection(); UserLog log; log = new UserLog(); log.ADate = DateTime.Now; log.EmpNumber = 206086; log.MachineNO = dwMachineNumber; log.VerifyMethod = 102; // out log.UniqueID = "201902271001"; log.PunchTime = new DateTime(2019, 2, 27, 10, 15, 30); logTemplates.Add(log); log = new UserLog(); log.ADate = DateTime.Now; log.EmpNumber = 206016; log.MachineNO = dwMachineNumber; log.VerifyMethod = 101; // out log.UniqueID = "201902271002"; log.PunchTime = new DateTime(2019, 2, 27, 10, 15, 30); logTemplates.Add(log); log = new UserLog(); log.ADate = DateTime.Now; log.EmpNumber = 205097; log.MachineNO = dwMachineNumber; log.VerifyMethod = 102; // out log.UniqueID = "201902271003"; log.PunchTime = new DateTime(2019, 2, 27, 10, 15, 30); logTemplates.Add(log); return(logTemplates); }
/// <summary> /// Adiciona um novo log ao LogCollection /// </summary> /// <param name="pMessage">Mensagem</param> public void AddLog(string pMessage) { LogCollection.Add(new Log { Message = pMessage }); }
private void Logger_OnLog(object sender, string e) { Application.Current.Dispatcher.Invoke(() => { LogCollection.Add(e); }); }
private async void Monitor_Callback(object state) { // Debug.WriteLine($"Monitor_Callback started. iterating {_monitorIterations} thread id = {Thread.CurrentThread.ManagedThreadId} "); List <LogHeader> logs; int count = 0; while (count < _monitorIterations) { if (count == 0) { _monitorStart.SetResult(null); } logs = await Proxy.Monitor(this.GameName, Players[0]); count++; if (logs != null) { // Debug.WriteLine($"Monitor returned with {logs.Count} records"); LogCollection.Add(logs); } else { // Debug.WriteLine($"Monitor returned with Zero records!!"); } } if (count == _monitorIterations) { _monitorTCS.SetResult(null); // Debug.WriteLine("Exiting worker thread"); } }
public void ReceivedLogData(object sendor, LogDataReceivedEventArgs args) { if (args == null || args.Log == null) { return; } System.Windows.Application.Current.Dispatcher.Invoke(() => { LogCollection.Add(args.Log); SelectedItem = LogCollection[LogCollection.Count - 1]; }); //_logDataArray.Add(args.Log); //_currentCount++; //if (_currentCount == MAX_COUNT) //{ // System.Windows.Application.Current.Dispatcher.Invoke((Action)delegate() // { // foreach (var log in _logDataArray) // { // LogCollection.Add(log); // } // SelectedItem = LogCollection[LogCollection.Count - 1]; // }); // _logDataArray = new ObservableCollection<ILogData>(); // _currentCount = 0; //} }
private void EngineOnLog(object sender, LogEventArgs e) { LastSiusMessage = e.Message; LogCollection.Add(e.Message); while (LogCollection.Count > 20) { LogCollection.RemoveAt(0); } }
private void BackupDataWithNoIntersection( IEnumerable <KeyValuePair <BackupDataLogKey, BackupDataLogValue> > intersectingLogs, BackupDataLogKey logKey, KeyValuePair <long, int> area, OnDisk.File.IFile f, string fFilename, ConcurrentIOPoolManager readPool, ConcurrentIOPoolManager writePool, RecordKey key) { string systemBackupFilename = Server.Path + DataBackupFilename; int size = area.Value; key.Address = area.Key; // no intersection nor mergeable logs, add new log! backup and log the data area ConcurrentIOData reader = f != null ? readPool.GetInstance(f, size) : readPool.GetInstance(fFilename, null, size); ConcurrentIOData writer = writePool.GetInstance(systemBackupFilename, (TransactionRoot)Root); if (reader == null || writer == null) { throw new SopException("This program has a bug! 'didn't get reader or writer from Async IO Pool."); } LogTracer.Verbose("BackupDataWithNoIntersection: Start for Thread {0}.", Thread.CurrentThread.ManagedThreadId); var logValue = new BackupDataLogValue(); logValue.DataSize = size; logValue.TransactionId = Id; logValue.BackupFileHandle = GetLogBackupFileHandle(DataBackupFilename); // return the current backup file size and grow it to make room for data to be backed up... logValue.BackupDataAddress = GrowBackupFile(size, writer.FileStream); // save a record of the backed up data.. LogCollection.Add(logKey, logValue); // log after data was backed up!! Sop.VoidFunc logBackedupData = () => { UpdateLogger.LogLine("{0}{1}:{2} to {3}:{4} Size={5}", BackupFromToken, f != null ? f.Filename : fFilename, area.Key, DataBackupFilename, logValue.BackupDataAddress, size); }; writer.FileStream.Seek(logValue.BackupDataAddress, SeekOrigin.Begin, true); reader.FileStream.Seek(area.Key, SeekOrigin.Begin, true); reader.FileStream.BeginRead( reader.Buffer, 0, size, ReadCallback, new object[] { new[] { reader, writer }, true, logKey, logBackedupData }); }
public static LogCollection SelectAll() { var List = new LogCollection(); using (IDataReader rd = SqlHelper.ExecuteReader(DAL.con(), CommandType.StoredProcedure, "sp_tblLog_Select_SelectAll_linhnx")) { while (rd.Read()) { List.Add(getFromReader(rd)); } } return(List); }
private static LogCollection DBMapping(DBLogCollection dbCollection) { if (dbCollection == null) { return(null); } var collection = new LogCollection(); foreach (var dbItem in dbCollection) { var item = DBMapping(dbItem); collection.Add(item); } return(collection); }
public void RefreshLogList(DatePicker datePicker) { try { DateTime selectedDate = (DateTime)datePicker.SelectedDate; if (selectedDate != null) { List <Log> logList = DataController.GetInstance.GetLog(selectedDate); lc = Application.Current.Resources["LogCollection"] as LogCollection; lc.Clear(); foreach (Log log in logList) { lc.Add(log); } Application.Current.Resources["LogCollection"] = lc; } } catch (MySqlException ex) { MessageBox.Show(ex.Message); } }
public void SearchLogList(string searchTerm) { if (searchTerm.Length == 0 || searchTerm.Equals("Search")) { llc = lc; Application.Current.Resources["LogListCollection"] = llc; } else { LogCollection tempList = new LogCollection(); foreach (Log log in lc) { if (log.Log_Part_Number.ToLower().Contains(searchTerm.ToLower()) || log.Log_User.ToLower().Contains(searchTerm.ToLower()) || log.Log_Performed_Action.ToLower().Contains(searchTerm.ToLower()) || log.Log_Part_Description.ToLower().Contains(searchTerm.ToLower()) || log.Log_Job_Number.ToLower().Contains(searchTerm.ToLower())) { tempList.Add(log); } } Application.Current.Resources["LogListCollection"] = tempList; } }
public void Log(LogInfoItem logInformation) { LogCollection.Add(logInformation); }
private void BackupDataWithNoIntersection( IEnumerable <KeyValuePair <BackupDataLogKey, BackupDataLogValue> > intersectingLogs, BackupDataLogKey logKey, KeyValuePair <long, int> area, OnDisk.File.IFile f, string fFilename, ConcurrentIOPoolManager readPool, ConcurrentIOPoolManager writePool, RecordKey key) { string systemBackupFilename = Server.Path + DataBackupFilename; int size = area.Value; key.Address = area.Key; //if (RegisterAdd(_addStore, null, null, key, size, false)) //{ // Logger.LogLine("Extending, skipping Backup..."); // return; //} //** no intersection nor mergeable logs, add new log! //** backup and log the data area ConcurrentIOData reader = f != null ? readPool.GetInstance(f, size) : readPool.GetInstance(fFilename, null, size); ConcurrentIOData writer = writePool.GetInstance(systemBackupFilename, (TransactionRoot)Root, size); if (reader == null || writer == null) { return; } var logValue = new BackupDataLogValue(); logValue.DataSize = size; logValue.TransactionId = Id; //** todo: can we remove this block: //long readerFileSize = reader.FileStream.Length; //if (area.Key + size > readerFileSize) //{ // int appendSize = (int)(area.Key + size - readerFileSize); // key.Address = readerFileSize; // RegisterAdd(_addStore, null, null, key, appendSize, false); // size = (int)(readerFileSize - area.Key); // logValue.DataSize = size; // reader.Buffer = new byte[size]; //} //** reader.FileStream.Seek(area.Key, SeekOrigin.Begin); logValue.BackupFileHandle = GetLogBackupFileHandle(DataBackupFilename); logValue.BackupDataAddress = writer.FileStream.Seek(0, SeekOrigin.End); UpdateLogger.LogLine("{0}{1}:{2} to {3}:{4} Size={5}", BackupFromToken, f != null ? f.Filename : fFilename, area.Key, DataBackupFilename, logValue.BackupDataAddress, size); // resize target file to accomodate data to be copied. writer.FileStream.Seek(size, SeekOrigin.End); writer.FileStream.Seek(logValue.BackupDataAddress, SeekOrigin.Begin); reader.FileStream.BeginRead( reader.Buffer, 0, size, ReadCallback, new object[] { new[] { reader, writer }, true, logKey } ); //** save a record of the backed up data.. LogCollection.Add(logKey, logValue); }
private void BackupDataWithIntersection( IEnumerable <KeyValuePair <BackupDataLogKey, BackupDataLogValue> > intersectingLogs, BackupDataLogKey logKey, KeyValuePair <long, int> area, OnDisk.File.IFile f, string fFilename, ConcurrentIOPoolManager readPool, ConcurrentIOPoolManager writePool, RecordKey key ) { if (intersectingLogs == null) { //** process conflicts with other trans... ProcessTransactionConflicts(logKey, area.Value); //** area is within an already backed up area (intersectingLogs == null), do nothing... return; } //** get area(s) outside each intersecting segment and back it up... var newRegion = new Region(area.Key, area.Value); bool wasIntersected = false; foreach (KeyValuePair <BackupDataLogKey, BackupDataLogValue> entry in intersectingLogs) { //** process conflicts with other trans... ProcessTransactionConflicts(entry.Key, entry.Value.DataSize); if (newRegion.Subtract(entry.Key.SourceDataAddress, entry.Value.DataSize)) { wasIntersected = true; } } //** copy if (!wasIntersected) { return; } foreach (KeyValuePair <long, int> newArea in newRegion) { var logKey2 = new BackupDataLogKey(); logKey2.SourceFilename = logKey.SourceFilename; logKey2.SourceDataAddress = newArea.Key; var logValue = new BackupDataLogValue(); logValue.DataSize = newArea.Value; logValue.TransactionId = Id; int newSize = newArea.Value; key.Address = newArea.Key; //if (RegisterAdd(_addStore, null, null, key, newArea.Value, false)) // return; logValue.BackupFileHandle = GetLogBackupFileHandle(DataBackupFilename); ConcurrentIOData reader = f != null ? readPool.GetInstance(f, newArea.Value) : readPool.GetInstance(fFilename, null, newArea.Value); if (reader == null) { throw new InvalidOperationException("Can't get ConcurrentIOData from ReadPool"); } string systemBackupFilename = Server.Path + DataBackupFilename; ConcurrentIOData writer = writePool.GetInstance(systemBackupFilename, ((TransactionRoot)Root), newArea.Value); if (writer == null) { throw new InvalidOperationException("Can't get ConcurrentIOData from WritePool"); } logValue.BackupDataAddress = writer.FileStream.Seek(0, SeekOrigin.End); //** todo: can we remove this block: //long readerFileSize = reader.FileStream.Length; //if (newArea.Key + newArea.Value > readerFileSize) //{ // int appendSize = (int)(newArea.Key + newArea.Value - readerFileSize); // key.Address = readerFileSize; // RegisterAdd(_addStore, null, null, key, appendSize, false); // newSize = (int)(readerFileSize - newArea.Key); // logValue.DataSize = newSize; // reader.Buffer = new byte[newSize]; //} //** reader.FileStream.Seek(newArea.Key, SeekOrigin.Begin); UpdateLogger.LogLine( "{0}{1}:{2} to {3}:{4} Size={5}", BackupFromToken, logKey2.SourceFilename, logKey2.SourceDataAddress, DataBackupFilename, logValue.BackupDataAddress, newSize); // resize target file to accomodate data to be copied. writer.FileStream.Seek(newSize, SeekOrigin.End); writer.FileStream.Seek(logValue.BackupDataAddress, SeekOrigin.Begin); reader.FileStream.BeginRead( reader.Buffer, 0, newSize, ReadCallback, new object[] { new[] { reader, writer }, true, logKey2 } ); //** save a record of the backed up data.. LogCollection.Add(logKey2, logValue); } }
void ExecuteAddLast() { LogCollection.Add(LogString); }
public JsonResult DownloadFromDevice() { // test dt.Clear(); dt.Columns.Add("attendanceDate"); dt.Columns.Add("dwEnrollNumber"); dt.Columns.Add("dwVerifyMode"); dt.Columns.Add("dwInOutMode"); //message = SaveLog(DumyData()); message = string.Empty; objCZKEM = new CZKEM(); bool IsRead = false, isConnected = false; string dwEnrollNumber = ""; int dwVerifyMode, dwInOutMode, dwYear, dwMonth, dwDay, dwHour, dwMinute, dwSecond, dwWorkcode = 0; int empNumber = 0, totalCount = 0; DateTime attendanceDate = new DateTime(); StringBuilder sb; string minute = string.Empty, second = string.Empty; try { if (!isConnected) { isConnected = objCZKEM.Connect_Net(ip, 4370); } string message2 = string.Empty; if (isConnected) { IsRead = objCZKEM.ReadGeneralLogData(dwMachineNumber); if (IsRead == true) { LogCollection logTemplates = new LogCollection(); while (objCZKEM.SSR_GetGeneralLogData(dwMachineNumber, out dwEnrollNumber, out dwVerifyMode, out dwInOutMode, out dwYear, out dwMonth, out dwDay, out dwHour, out dwMinute, out dwSecond, ref dwWorkcode)) { if (dwYear > 2020) { empNumber = Convert.ToInt32(dwEnrollNumber); totalCount++; sb = new StringBuilder(); sb.Append(empNumber); sb.Append(dwYear); sb.Append(dwMonth); sb.Append(dwDay); sb.Append(dwHour); sb.Append(dwMinute); sb.Append(dwSecond); attendanceDate = new DateTime(dwYear, dwMonth, dwDay, dwHour, dwMinute, dwSecond); UserLog log = new UserLog(); log.ADate = attendanceDate; log.EmpNumber = empNumber; log.MachineNO = dwMachineNumber; log.VerifyMethod = dwVerifyMode; log.UniqueID = sb.ToString(); log.PunchTime = new DateTime(dwYear, dwMonth, dwDay, dwHour, dwMinute, dwSecond); logTemplates.Add(log); } //showInOut(attendanceDate, dwEnrollNumber, dwVerifyMode, dwInOutMode); //GetGeneratLog(objCZKEM, dwMachineNumber, dwEnrollNumber); //message2 = dwEnrollNumber + " " + dwVerifyMode + dwInOutMode; } message = SaveLog(logTemplates); DataTable newDt = dt; message = message2; } else { message = "No Log Found...."; } } else { message = "Device Not Connected"; } } catch (Exception ex) { message = ex.Message.ToString(); } return(Json(message, JsonRequestBehavior.AllowGet)); }
private void BackupDataWithIntersection( IEnumerable <KeyValuePair <BackupDataLogKey, BackupDataLogValue> > intersectingLogs, BackupDataLogKey logKey, KeyValuePair <long, int> area, OnDisk.File.IFile f, string fFilename, ConcurrentIOPoolManager readPool, ConcurrentIOPoolManager writePool, RecordKey key ) { if (intersectingLogs == null) { // process conflicts with other trans... //ProcessTransactionConflicts(logKey, area.Value); // area is within an already backed up area (intersectingLogs == null), do nothing... return; } LogTracer.Verbose("BackupDataWithIntersection: Start for Thread {0}.", Thread.CurrentThread.ManagedThreadId); // get area(s) outside each intersecting segment and back it up... var newRegion = new Region(area.Key, area.Value); #region for future implements... ? //bool wasIntersected = false; //foreach (KeyValuePair<BackupDataLogKey, BackupDataLogValue> entry in intersectingLogs) //{ // // process conflicts with other trans... // ProcessTransactionConflicts(entry.Key, entry.Value.DataSize); // if (newRegion.Subtract(entry.Key.SourceDataAddress, entry.Value.DataSize)) // wasIntersected = true; //} //if (!wasIntersected) return; #endregion // copy modified blocks to the transaction backup file. foreach (KeyValuePair <long, int> newArea in newRegion) { if (readPool.AsyncThreadException != null) { throw readPool.AsyncThreadException; } if (writePool.AsyncThreadException != null) { throw writePool.AsyncThreadException; } var logKey2 = new BackupDataLogKey(); logKey2.SourceFilename = logKey.SourceFilename; logKey2.SourceDataAddress = newArea.Key; var logValue = new BackupDataLogValue(); logValue.DataSize = newArea.Value; logValue.TransactionId = Id; int newSize = newArea.Value; key.Address = newArea.Key; //if (RegisterAdd(_addBlocksStore, null, null, key, newArea.Value, false)) // return; logValue.BackupFileHandle = GetLogBackupFileHandle(DataBackupFilename); ConcurrentIOData reader = f != null ? readPool.GetInstance(f, newArea.Value) : readPool.GetInstance(fFilename, null, newArea.Value); if (reader == null) { throw new InvalidOperationException("Can't get ConcurrentIOData from ReadPool"); } string systemBackupFilename = Server.Path + DataBackupFilename; ConcurrentIOData writer = writePool.GetInstance(systemBackupFilename, ((TransactionRoot)Root)); if (writer == null) { throw new InvalidOperationException("Can't get ConcurrentIOData from WritePool"); } // return the current backup file size and grow it to make room for data to be backed up... logValue.BackupDataAddress = GrowBackupFile(newSize, writer.FileStream); // save a record of the backed up data.. LogCollection.Add(logKey2, logValue); // prepare lambda expression to log after data was backed up!! Sop.VoidFunc logBackedupData = () => { UpdateLogger.LogLine( "{0}{1}:{2} to {3}:{4} Size={5}", BackupFromToken, logKey2.SourceFilename, logKey2.SourceDataAddress, DataBackupFilename, logValue.BackupDataAddress, newSize); }; writer.FileStream.Seek(logValue.BackupDataAddress, SeekOrigin.Begin, true); reader.FileStream.Seek(newArea.Key, SeekOrigin.Begin, true); reader.FileStream.BeginRead( reader.Buffer, 0, newSize, ReadCallback, new object[] { new[] { reader, writer }, true, logKey2, logBackedupData }); } }