internal FtpSessionConnected(FtpClient h, FtpControlChannel ctrl, bool caseInsensitive) { m_host = h; m_ctrlChannel = ctrl; m_ctrlChannel.Session = this; m_caseInsensitive = caseInsensitive; }
internal FtpControlChannel(FtpClient host) { m_connection = new TcpClient(); m_server = "localhost"; m_port = 21; m_sessionHost = host; m_currentTransferMode = TransferMode.Unknown; }
private void ReplicateToHadoop(FtpClient ftpClient, string replicationFolder, DataTable replicationLog) { WriteTrace("Replicating folder '{0}'", FilePath.TrimFileName(replicationFolder, FilePathTrimLength)); // Create list of files to be replicated. List<string> files = new List<string>(Directory.GetFiles(replicationFolder, "*_to_*.d", SearchOption.AllDirectories)); files.Sort(); // Process all the files in the list. WriteTrace("Found {0} files in folder", files.Count); foreach (string file in files) { // Initialize local variables. int requests = int.MinValue; bool uploading = false; double hashingStartTime = double.MinValue; double hashingTotalTime = double.MinValue; double transferStartTime = double.MinValue; double transferTotalTime = double.MinValue; string justFileName = FilePath.GetFileName(file); DataRow record = null; DataRow[] filter = replicationLog.Select(string.Format("FileName ='{0}'", justFileName)); FileInfo fileInfo = new FileInfo(file); WriteTrace("Replicating file '{0}' of size {1:0,0} KB", FilePath.TrimFileName(file, FilePathTrimLength), Convert.ToInt32(fileInfo.Length / 1024D)); try { // Continue to "ping" FTP server so that it knows we are alive and well ftpClient.ControlChannel.Command("NOOP"); // Compute HDFS file hash. WriteTrace("Hashing file"); hashingStartTime = Common.SystemTimer; byte[] localHash = ComputeHdfsFileHash(file, m_bytesPerCrc32, m_hdfsBlockSize, m_applyBufferPadding); hashingTotalTime = Common.SystemTimer - hashingStartTime; WriteTrace("File hashed in {0} seconds", Convert.ToInt32(hashingTotalTime)); // Check if file is to be uploaded. if (filter.Length == 0 || filter[0]["FileSync"].ToString() == "Fail" || localHash.CompareTo(ByteEncoding.Hexadecimal.GetBytes(filter[0]["FileHash"].ToString())) != 0) { // Upload file to HDFS since: // 1) File has not been replicated previously. // OR // 2) File has been replicated in the past, but its content has changed since then. uploading = true; WriteTrace("Uploading file"); transferStartTime = Common.SystemTimer; ftpClient.CurrentDirectory.PutFile(file); transferTotalTime = Common.SystemTimer - transferStartTime; WriteTrace("File uploaded in {0} seconds", Convert.ToInt32(transferTotalTime)); // Request file hash from HDFS. for (requests = 1; requests <= m_hashRequestAttempts; requests++) { try { // Wait before request. WriteTrace("Waiting {0} seconds before HDFS hash request", m_hashRequestWaitTime / 1000); Thread.Sleep(m_hashRequestWaitTime); // Request file hash. WriteTrace("Requesting HDFS hash (Attempt {0})", requests); ftpClient.ControlChannel.Command(string.Format("HDFSCHKSM {0}{1}", ftpClient.CurrentDirectory.FullPath, justFileName)); WriteTrace("Hash request response - {0}", ftpClient.ControlChannel.LastResponse.Message.RemoveCrLfs()); // Exit when successful. if (ftpClient.ControlChannel.LastResponse.Code == 200) break; } catch (Exception ex) { // Try again - Apache MINA FTP server acts funny with updoad & hash check of large files. WriteTrace("Hash request error - {0}", ex.Message); } } // Initialize replication log entry. if (filter.Length > 0) { record = filter[0]; } else { record = replicationLog.NewRow(); replicationLog.Rows.Add(record); } // Update replication log entry. record["DateTime"] = DateTime.UtcNow; record["FileName"] = justFileName; record["FileHash"] = ByteEncoding.Hexadecimal.GetString(localHash); record["HashingTime"] = hashingTotalTime.ToString("0.000"); record["TransferTime"] = transferTotalTime.ToString("0.000"); record["TransferRate"] = ((new FileInfo(file).Length / SI2.Kilo) / transferTotalTime).ToString("0.00"); record["ServerRequests"] = requests < m_hashRequestAttempts ? requests : m_hashRequestAttempts; record["ServerResponse"] = ftpClient.ControlChannel.LastResponse.Message.RemoveCrLfs(); // Compare local and HDFS hash. if (ftpClient.ControlChannel.LastResponse.Code == 200 && localHash.CompareTo(ByteEncoding.Hexadecimal.GetBytes(ftpClient.ControlChannel.LastResponse.Message.RemoveCrLfs().Split(':')[1])) == 0) { // File uploaded and hashes match. record["FileSync"] = "Pass"; WriteTrace("Replication successful"); // Deleted original file after replication. if (m_deleteOriginalFiles) DeleteOriginalFile(file, fileInfo); // Notify about the successful replication. OnReplicationProgress(new ProcessProgress<int>("ReplicateArchive", justFileName, 1, 1)); } else { // Hashes are different - possible causes: // 1) Local file got modified after hash was computed locally. // OR // 2) Local and remote hashing algorithms are not the same. record["FileSync"] = "Fail"; WriteTrace("Replication unsuccessful"); throw new InvalidDataException("File hash mismatch"); } // Write replication entry to the log file. WriteTrace("Updating replication log file"); replicationLog.WriteXml(FilePath.GetAbsolutePath(ReplicationLogFile)); WriteTrace("Replication log file updated"); } else { WriteTrace("Replication skipped - file content unchanged"); // Deleted original file if skipped previously. if (m_deleteOriginalFiles) DeleteOriginalFile(file, fileInfo); } } catch (Exception ex) { WriteTrace("Replication error - {0}", ex.Message); // Delete file from FTP site if an exception is encountered when processing the file. try { if (uploading && ftpClient.IsConnected) { WriteTrace("Deleting partial upload"); ftpClient.CurrentDirectory.RemoveFile(justFileName); WriteTrace("Partial upload deleted"); } } catch (Exception exDelete) { WriteTrace("Delete error - {0}", exDelete.Message); } if (ex is ThreadAbortException) // Re-throw the encountered exception. throw; else // Notify about the encountered exception. OnReplicationException(ex); } } WriteTrace("Folder '{0}' replicated", FilePath.TrimFileName(replicationFolder, FilePathTrimLength)); }
/// <summary> /// Replicates the <see cref="TVA.Historian.IArchive"/>. /// </summary> protected override void ReplicateArchive() { WriteTrace("Archive replication started"); // Parse FTP client information. Uri replicaUri = new Uri(ReplicaLocation); string[] credentials = replicaUri.UserInfo.Split(':'); // Ensure credentials are supplied. if (credentials.Length != 2) throw new ArgumentException("FTP credentials are missing in ReplicaLocation."); // Create FTP client for uploading. FtpClient ftpClient = new FtpClient(); ftpClient.Server = replicaUri.Host; ftpClient.Port = replicaUri.Port; ftpClient.FileTransferProgress += FtpClient_FileTransferProgress; // Initialize the replication log. WriteTrace("Initializing archive replication log"); DataTable replicationLog = new DataTable("ReplicationRecord"); replicationLog.Columns.Add("DateTime"); replicationLog.Columns.Add("FileName"); replicationLog.Columns.Add("FileHash"); replicationLog.Columns.Add("FileSync"); replicationLog.Columns.Add("HashingTime"); replicationLog.Columns.Add("TransferTime"); replicationLog.Columns.Add("TransferRate"); replicationLog.Columns.Add("ServerRequests"); replicationLog.Columns.Add("ServerResponse"); if (File.Exists(FilePath.GetAbsolutePath(ReplicationLogFile))) replicationLog.ReadXml(FilePath.GetAbsolutePath(ReplicationLogFile)); WriteTrace("Archive replication log initialized"); try { // Connect FTP client to server. WriteTrace("Connecting to ftp://{0}:{1}", ftpClient.Server, ftpClient.Port); ftpClient.Connect(credentials[0], credentials[1]); WriteTrace("Connection successful"); WriteTrace("Changing current directory to '{0}'", replicaUri.AbsolutePath); ftpClient.SetCurrentDirectory(replicaUri.LocalPath); WriteTrace("Current directory changed to '{0}'", ftpClient.CurrentDirectory.FullPath); // Process all archive location(s). foreach (string folder in ArchiveLocation.Split(';')) { if (!string.IsNullOrEmpty(folder)) ReplicateToHadoop(ftpClient, folder.Trim(), replicationLog); } } finally { ftpClient.Dispose(); replicationLog.WriteXml(FilePath.GetAbsolutePath(ReplicationLogFile)); } WriteTrace("Archive replication complete"); }
/// <summary> /// Releases the unmanaged resources used by the <see cref="FtpSessionDisconnected"/> object and optionally releases the managed resources. /// </summary> /// <param name="disposing">true to release both managed and unmanaged resources; false to release only unmanaged resources.</param> protected virtual void Dispose(bool disposing) { if (!m_disposed) { try { if (disposing) m_host = null; } finally { m_disposed = true; // Prevent duplicate dispose. } } }
internal FtpSessionDisconnected(FtpClient h, bool caseInsensitive) { m_port = 21; m_host = h; m_caseInsensitive = caseInsensitive; }
/// <summary> /// Releases the unmanaged resources used by the <see cref="FtpSessionConnected"/> object and optionally releases the managed resources. /// </summary> /// <param name="disposing">true to release both managed and unmanaged resources; false to release only unmanaged resources.</param> protected virtual void Dispose(bool disposing) { if (!m_disposed) { try { // This will be done regardless of whether the object is finalized or disposed. if (disposing) { m_host = null; m_root = null; m_current = null; if (m_ctrlChannel != null) m_ctrlChannel.Close(); m_ctrlChannel = null; if (m_dataStream != null) m_dataStream.Dispose(); m_dataStream = null; } } finally { m_disposed = true; // Prevent duplicate dispose. } } }
/// <summary> /// Clones FTP session used by file watcher so it can be used for other purposes. /// </summary> /// <returns>New connected FTP session matching settings defined for FTP file watcher.</returns> public virtual FtpClient CloneFtpSession() { // This method is just for convenience. We can't allow the end user to use the // actual internal directory for sending files or other work because it is // constantly being refreshed/used etc., so we instead create a new FTP Session // based on the current internal session and watch directory information FtpClient newSession = new FtpClient(m_session.CaseInsensitive); newSession.Server = m_session.Server; newSession.Connect(m_username, m_password); newSession.SetCurrentDirectory(m_watchDirectory); return newSession; }
/// <summary> /// Releases the unmanaged resources used by the <see cref="FtpFileWatcher"/> object and optionally releases the managed resources. /// </summary> /// <param name="disposing">true to release both managed and unmanaged resources; false to release only unmanaged resources.</param> protected override void Dispose(bool disposing) { try { if (!m_disposed) { if (disposing) { Close(); if (m_session != null) { m_session.CommandSent -= OnCommandSent; m_session.ResponseReceived -= OnResponseReceived; m_session.Dispose(); } m_session = null; if (m_watchTimer != null) { m_watchTimer.Elapsed -= WatchTimer_Elapsed; m_watchTimer.Dispose(); } m_watchTimer = null; if (m_restartTimer != null) { m_restartTimer.Elapsed -= RestartTimer_Elapsed; m_restartTimer.Dispose(); } m_restartTimer = null; } } m_disposed = true; } finally { base.Dispose(disposing); } }
/// <summary> /// Constructs a new FTP file watcher using the default settings. /// </summary> public FtpFileWatcher() : base() { m_enabled = true; m_notifyOnComplete = true; m_currentFiles = new List<FtpFile>(); m_newFiles = new List<FtpFile>(); m_session = new FtpClient(false); m_session.CommandSent += OnCommandSent; m_session.ResponseReceived += OnResponseReceived; // Define a timer to watch for new files m_watchTimer = new System.Timers.Timer(); m_watchTimer.Elapsed += WatchTimer_Elapsed; m_watchTimer.AutoReset = false; m_watchTimer.Interval = 5000; m_watchTimer.Enabled = false; // Define a timer for FTP connection in case of availability failures m_restartTimer = new System.Timers.Timer(); m_restartTimer.Elapsed += RestartTimer_Elapsed; m_restartTimer.AutoReset = false; m_restartTimer.Interval = 10000; m_restartTimer.Enabled = false; }