private void WriteToHistoricArchiveFile(ArchiveData[] items) { if (m_buildHistoricFileListThread.IsAlive) // Wait until the historic file list has been built. m_buildHistoricFileListThread.Join(); OnHistoricDataWriteStart(); Dictionary<int, List<ArchiveData>> sortedPointData = new Dictionary<int, List<ArchiveData>>(); // First we'll seperate all point data by ID. for (int i = 0; i < items.Length; i++) { if (!sortedPointData.ContainsKey(items[i].HistorianID)) { sortedPointData.Add(items[i].HistorianID, new List<ArchiveData>()); } sortedPointData[items[i].HistorianID].Add(items[i]); } ProcessProgress<int> historicWriteProgress = new ProcessProgress<int>("HistoricWrite"); historicWriteProgress.Total = items.Length; foreach (int pointID in sortedPointData.Keys) { // We'll sort the point data for the current point ID by time. sortedPointData[pointID].Sort(); ArchiveFile historicFile = null; ArchiveDataBlock historicFileBlock = null; try { for (int i = 0; i < sortedPointData[pointID].Count; i++) { if (historicFile == null) { // We'll try to find a historic file when the current point data belongs. Info historicFileInfo; m_writeSearchTimeTag = sortedPointData[pointID][i].Time; lock (m_historicArchiveFiles) { historicFileInfo = m_historicArchiveFiles.Find(FindHistoricArchiveFileForWrite); } if (historicFileInfo != null) { // Found a historic file where the data can be written. historicFile = new ArchiveFile(); historicFile.FileName = historicFileInfo.FileName; historicFile.StateFile = m_stateFile; historicFile.IntercomFile = m_intercomFile; historicFile.MetadataFile = m_metadataFile; historicFile.Open(); } } if (historicFile != null) { if (sortedPointData[pointID][i].Time.CompareTo(historicFile.Fat.FileStartTime) >= 0 && sortedPointData[pointID][i].Time.CompareTo(historicFile.Fat.FileEndTime) <= 0) { // The current point data belongs to the current historic archive file. if (historicFileBlock == null || historicFileBlock.SlotsAvailable == 0) { // Request a new or previously used data block for point data. historicFileBlock = historicFile.Fat.RequestDataBlock(pointID, sortedPointData[pointID][i].Time, -1); } historicFileBlock.Write(sortedPointData[pointID][i]); historicFile.Fat.DataPointsReceived++; historicFile.Fat.DataPointsArchived++; if (i == sortedPointData[pointID].Count() - 1) { // Last piece of data for the point, so we close the currently open file. historicFile.Save(); historicFile.Dispose(); historicFile = null; historicFileBlock = null; } historicWriteProgress.Complete++; } else { // The current point data doesn't belong to the current historic archive file, so we have // to write all the point data we have so far for the current historic archive file to it. i--; historicFile.Dispose(); historicFile = null; historicFileBlock = null; } } } // Notify of progress per point. historicWriteProgress.ProgressMessage = string.Format("Wrote historic data for point id {0} ({1} of {2}).", pointID, "{0}", "{1}"); OnHistoricDataWriteProgress(historicWriteProgress); } catch (Exception ex) { // Free-up used memory. if (historicFile != null) { try { historicFile.Dispose(); historicFile = null; } catch { } } // Notify of the exception. OnHistoricDataWriteException(ex); } } OnHistoricDataWriteComplete(); }
private void InsertInCurrentArchiveFile(ArchiveData[] items) { // TODO: Implement archival of out-of-sequence data. }
/// <summary> /// Raises the <see cref="HistoricDataReceived"/> event. /// </summary> /// <param name="dataPoint"><see cref="ArchiveData"/> to send to <see cref="HistoricDataReceived"/> event.</param> protected virtual void OnHistoricDataReceived(ArchiveData dataPoint) { if (HistoricDataReceived != null) HistoricDataReceived(this, new EventArgs<ArchiveData>(dataPoint)); }
/// <summary> /// Raises the <see cref="OutOfSequenceDataReceived"/> event. /// </summary> /// <param name="dataPoint"><see cref="ArchiveData"/> to send to <see cref="OutOfSequenceDataReceived"/> event.</param> protected virtual void OnOutOfSequenceDataReceived(ArchiveData dataPoint) { if (OutOfSequenceDataReceived != null) OutOfSequenceDataReceived(this, new EventArgs<ArchiveData>(dataPoint)); }
/// <summary> /// Raises the <see cref="FutureDataReceived"/> event. /// </summary> /// <param name="dataPoint"><see cref="ArchiveData"/> to send to <see cref="FutureDataReceived"/> event.</param> protected virtual void OnFutureDataReceived(ArchiveData dataPoint) { if (FutureDataReceived != null) FutureDataReceived(this, new EventArgs<ArchiveData>(dataPoint)); }
/// <summary> /// Raises the <see cref="OrphanDataReceived"/> event. /// </summary> /// <param name="dataPoint"><see cref="ArchiveData"/> to send to <see cref="OrphanDataReceived"/> event.</param> protected virtual void OnOrphanDataReceived(ArchiveData dataPoint) { if (OrphanDataReceived != null) OrphanDataReceived(this, new EventArgs<ArchiveData>(dataPoint)); }
/// <summary> /// Writes the specified <paramref name="dataPoint"/> to the <see cref="ArchiveFile"/>. /// </summary> /// <param name="dataPoint"><see cref="ArchiveData"/> to be written.</param> public void WriteData(IDataPoint dataPoint) { // Ensure that the current file is open. if (!IsOpen) throw new InvalidOperationException(string.Format("\"{0}\" file is not open.", m_fileName)); // Ensure that the current file is active. if (m_fileType != ArchiveFileType.Active) throw new InvalidOperationException("Data can only be directly written to files that are Active."); // Yeild to the rollover process if it is in progress. m_rolloverWaitHandle.WaitOne(); // Initialize local variables. ArchiveData data = (ArchiveData)dataPoint; MetadataRecord metadata = m_metadataFile.Read(data.HistorianID); StateRecord state = m_stateFile.Read(data.HistorianID); IntercomRecord system = m_intercomFile.Read(1); // Ensure that the received data is to be archived. if (metadata == null || !metadata.GeneralFlags.Enabled) { OnOrphanDataReceived(data); return; } // Ensure that data is not far out in to the future. if (data.Time > DateTime.UtcNow.AddMinutes(m_leadTimeTolerance)) { OnFutureDataReceived(data); return; } // Perform quality check if data quality is not set. if ((int)data.Quality == 31) { // Note: Here we're checking if the Quality is 31 instead of -1 because the quality value is stored // in the first 5 bits (QualityMask = 31) of Flags in the point data. Initially when the Quality is // set to -1, all the bits Flags (a 32-bit integer) are set to 1. And therefore, when we get the // Quality, which is a masked value of Flags, we get 31 and not -1. switch (metadata.GeneralFlags.DataType) { case DataType.Analog: if (data.Value >= metadata.AnalogFields.HighRange) data.Quality = Quality.UnreasonableHigh; else if (data.Value >= metadata.AnalogFields.HighAlarm) data.Quality = Quality.ValueAboveHiHiAlarm; else if (data.Value >= metadata.AnalogFields.HighWarning) data.Quality = Quality.ValueAboveHiAlarm; else if (data.Value <= metadata.AnalogFields.LowRange) data.Quality = Quality.UnreasonableLow; else if (data.Value <= metadata.AnalogFields.LowAlarm) data.Quality = Quality.ValueBelowLoLoAlarm; else if (data.Value <= metadata.AnalogFields.LowWarning) data.Quality = Quality.ValueBelowLoAlarm; else data.Quality = Quality.Good; break; case DataType.Digital: if (data.Value == metadata.DigitalFields.AlarmState) data.Quality = Quality.LogicalAlarm; else data.Quality = Quality.Good; break; } } // Update information about the latest data point received. if (data.Time > system.LatestDataTime) { system.LatestDataID = data.HistorianID; system.LatestDataTime = data.Time; m_intercomFile.Write(1, system); } // Check for data that out-of-sequence based on it's time. if (data.Time <= state.PreviousData.Time) { if (data.Time == state.PreviousData.Time) { // Discard data that is an exact duplicate of data in line for archival. if (data.Value == state.PreviousData.Value && data.Quality == state.PreviousData.Quality) return; } else { // Queue out-of-sequence data for processing if it is not be discarded. if (!m_discardOutOfSequenceData) m_outOfSequenceDataQueue.Add(data); OnOutOfSequenceDataReceived(data); return; } } // [BEGIN] Data compression bool archiveData = false; bool calculateSlopes = false; float compressionLimit = metadata.AnalogFields.CompressionLimit; // Set the compression limit to a very low number for digital points. if (metadata.GeneralFlags.DataType == DataType.Digital) compressionLimit = 0.000000001f; state.CurrentData = new StateRecordData(data); if (state.ArchivedData.IsEmpty) { // This is the first time data is received. state.CurrentData = new StateRecordData(-1); archiveData = true; } else if (state.PreviousData.IsEmpty) { // This is the second time data is received. calculateSlopes = true; } else { // Process quality-based alarming if enabled. if (metadata.GeneralFlags.AlarmEnabled) { if ((metadata.AlarmFlags.Value & (2 ^ (int)state.CurrentData.Quality)) != 0) { // Current data quality warrants alarming based on the alarming settings. float delay = 0; switch (metadata.GeneralFlags.DataType) { case DataType.Analog: delay = metadata.AnalogFields.AlarmDelay; break; case DataType.Digital: delay = metadata.DigitalFields.AlarmDelay; break; } // Dispatch the alarm immediately or after a given time based on settings. if (delay > 0) { // Wait before dispatching alarm. double first; if (m_delayedAlarmProcessing.TryGetValue(data.HistorianID, out first)) { if (state.CurrentData.Time.Value - first > delay) { // Wait is now over, dispatch the alarm. m_delayedAlarmProcessing.Remove(data.HistorianID); OnProcessAlarmNotification(state); } } else { m_delayedAlarmProcessing.Add(state.HistorianID, state.CurrentData.Time.Value); } } else { // Dispatch the alarm immediately. OnProcessAlarmNotification(state); } } else { m_delayedAlarmProcessing.Remove(data.HistorianID); } } if (m_compressData) { // Data is to be compressed. if (metadata.CompressionMinTime > 0 && state.CurrentData.Time.Value - state.ArchivedData.Time.Value < metadata.CompressionMinTime) { // CompressionMinTime is in effect. archiveData = false; calculateSlopes = false; } else if (state.CurrentData.Quality != state.ArchivedData.Quality || state.CurrentData.Quality != state.PreviousData.Quality || (metadata.CompressionMaxTime > 0 && state.PreviousData.Time.Value - state.ArchivedData.Time.Value > metadata.CompressionMaxTime)) { // Quality changed or CompressionMaxTime is exceeded. data = new ArchiveData(state.PreviousData); archiveData = true; calculateSlopes = true; } else { // Perform a compression test. double slope1; double slope2; double currentSlope; slope1 = (state.CurrentData.Value - (state.ArchivedData.Value + compressionLimit)) / (state.CurrentData.Time.Value - state.ArchivedData.Time.Value); slope2 = (state.CurrentData.Value - (state.ArchivedData.Value - compressionLimit)) / (state.CurrentData.Time.Value - state.ArchivedData.Time.Value); currentSlope = (state.CurrentData.Value - state.ArchivedData.Value) / (state.CurrentData.Time.Value - state.ArchivedData.Time.Value); if (slope1 >= state.Slope1) state.Slope1 = slope1; if (slope2 <= state.Slope2) state.Slope2 = slope2; if (currentSlope <= state.Slope1 || currentSlope >= state.Slope2) { data = new ArchiveData(state.PreviousData); archiveData = true; calculateSlopes = true; } } } else { // Data is not to be compressed. data = new ArchiveData(state.PreviousData); archiveData = true; } } // [END] Data compression // [BEGIN] Data archival m_fat.DataPointsReceived++; if (archiveData) { if (data.Time >= m_fat.FileStartTime) { // Data belongs to this file. ArchiveDataBlock dataBlock; lock (m_dataBlocks) { dataBlock = m_dataBlocks[data.HistorianID - 1]; } if (dataBlock == null || dataBlock.SlotsAvailable == 0) { // Need to find a data block for writting the data. if (dataBlock != null) { dataBlock = null; state.ActiveDataBlockIndex = -1; } if (state.ActiveDataBlockIndex >= 0) { // Retrieve previously used data block. dataBlock = m_fat.RequestDataBlock(data.HistorianID, data.Time, state.ActiveDataBlockIndex); } else { // Time to request a brand new data block. dataBlock = m_fat.RequestDataBlock(data.HistorianID, data.Time, system.DataBlocksUsed); } if (dataBlock != null) { // Update the total number of data blocks used. if (dataBlock.SlotsUsed == 0 && system.DataBlocksUsed == dataBlock.Index) { system.DataBlocksUsed++; m_intercomFile.Write(1, system); } // Update the active data block index information. state.ActiveDataBlockIndex = dataBlock.Index; } // Keep in-memory reference to the data block for consecutive writes. lock (m_dataBlocks) { m_dataBlocks[data.HistorianID - 1] = dataBlock; } // Kick-off the rollover preparation when its threshold is reached. if (Statistics.FileUsage >= m_rolloverPreparationThreshold && !File.Exists(StandbyArchiveFileName) && !m_rolloverPreparationThread.IsAlive) { m_rolloverPreparationThread = new Thread(new ThreadStart(PrepareForRollover)); m_rolloverPreparationThread.Priority = ThreadPriority.Lowest; m_rolloverPreparationThread.Start(); } } if (dataBlock != null) { // Write data to the data block. dataBlock.Write(data); m_fat.DataPointsArchived++; } else { // File is full, rollover if configured. OnFileFull(); if (m_rolloverOnFull) { while (true) { Rollover(); // Start rollover. if (m_rolloverWaitHandle.WaitOne(1, false)) { break; // Rollover is successful. } } } // Re-read the state information since it is modified during the rollover. state = m_stateFile.Read(data.HistorianID); } } else { // Data is historic. m_fat.DataPointsReceived--; m_historicDataQueue.Add(data); OnHistoricDataReceived(data); } state.ArchivedData = new StateRecordData(data); } if (calculateSlopes) { if (state.CurrentData.Time.Value != state.ArchivedData.Time.Value) { state.Slope1 = (state.CurrentData.Value - (state.ArchivedData.Value + compressionLimit)) / (state.CurrentData.Time.Value - state.ArchivedData.Time.Value); state.Slope2 = (state.CurrentData.Value - (state.ArchivedData.Value - compressionLimit)) / (state.CurrentData.Time.Value - state.ArchivedData.Time.Value); } else { state.Slope1 = 0; state.Slope2 = 0; } } state.PreviousData = state.CurrentData; // Write state information to the file. m_stateFile.Write(state.HistorianID, state); // [END] Data archival }
/// <summary> /// Writes the <paramref name="dataPoint"/> to the <see cref="ArchiveDataBlock"/>. /// </summary> /// <param name="dataPoint"><see cref="ArchiveData"/> point to write.</param> public void Write(ArchiveData dataPoint) { if (SlotsAvailable > 0) { // We have enough space to write the provided point data to the data block. m_lastActivityTime = DateTime.Now; lock (m_parent.FileData) { // Write the data. m_parent.FileData.Seek(m_writeCursor, SeekOrigin.Begin); m_parent.FileData.Write(dataPoint.BinaryImage, 0, ArchiveData.ByteCount); // Update the write cursor. m_writeCursor = m_parent.FileData.Position; // Flush the data if configured. if (!m_parent.CacheWrites) m_parent.FileData.Flush(); } } else { throw (new InvalidOperationException("No slots available for writing new data.")); } }
/// <summary> /// Reads existing <see cref="ArchiveData"/> points from the <see cref="ArchiveDataBlock"/>. /// </summary> /// <returns>Returns <see cref="ArchiveData"/> points from the <see cref="ArchiveDataBlock"/>.</returns> public IEnumerable<ArchiveData> Read() { lock (m_parent.FileData) { // We'll start reading from where the data block begins. m_parent.FileData.Seek(Location, SeekOrigin.Begin); byte[] binaryImage = new byte[ArchiveData.ByteCount]; for (int i = 1; i <= Capacity; i++) { // Read the data in the block. m_lastActivityTime = DateTime.Now; m_parent.FileData.Read(binaryImage, 0, binaryImage.Length); ArchiveData dataPoint = new ArchiveData(m_historianID, binaryImage, 0, binaryImage.Length); if (!dataPoint.IsEmpty) { // There is data - use it. m_writeCursor = m_parent.FileData.Position; yield return dataPoint; } else { // Data is empty - stop reading. yield break; } } } }