public bool VerifyChecksum() { uint previousChecksum = LastChecksum; LastChecksum = FileChecksum.Calculate(FullPath); return(LastChecksum != previousChecksum); }
public bool IsMet(IUpdateTask task) { var localPath = !string.IsNullOrEmpty(LocalPath) ? LocalPath : Utils.Reflection.GetNauAttribute(task, "LocalPath") as string; // local path is invalid, we can't check for anything so we will return as if the condition was met if (string.IsNullOrEmpty(localPath)) { return(true); } // if the local file does not exist, checksums don't match vacuously if (!File.Exists(localPath)) { return(false); } if ("sha256".Equals(ChecksumType, StringComparison.InvariantCultureIgnoreCase)) { var sha256 = FileChecksum.GetSHA256Checksum(localPath); if (!string.IsNullOrEmpty(sha256) && sha256.Equals(Checksum, StringComparison.InvariantCultureIgnoreCase)) { return(true); } } // TODO: Support more checksum algorithms (although SHA256 isn't known to have collisions, other are more commonly used) return(false); }
public virtual void TestEncryptedReadWithRC4() { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = GetFileSystem(conf); WriteTestDataToFile(fs); NUnit.Framework.Assert.AreEqual(PlainText, DFSTestUtil.ReadFile(fs, TestPath)); FileChecksum checksum = fs.GetFileChecksum(TestPath); fs.Close(); cluster.Shutdown(); SetEncryptionConfigKeys(conf); // It'll use 3DES by default, but we set it to rc4 here. conf.Set(DFSConfigKeys.DfsDataEncryptionAlgorithmKey, "rc4"); cluster = new MiniDFSCluster.Builder(conf).ManageDataDfsDirs(false).ManageNameDfsDirs (false).Format(false).StartupOption(HdfsServerConstants.StartupOption.Regular).Build (); fs = GetFileSystem(conf); GenericTestUtils.LogCapturer logs = GenericTestUtils.LogCapturer.CaptureLogs(LogFactory .GetLog(typeof(SaslDataTransferServer))); GenericTestUtils.LogCapturer logs1 = GenericTestUtils.LogCapturer.CaptureLogs(LogFactory .GetLog(typeof(DataTransferSaslUtil))); try { NUnit.Framework.Assert.AreEqual(PlainText, DFSTestUtil.ReadFile(fs, TestPath)); NUnit.Framework.Assert.AreEqual(checksum, fs.GetFileChecksum(TestPath)); } finally { logs.StopCapturing(); logs1.StopCapturing(); } fs.Close(); if (resolverClazz == null) { // Test client and server negotiate cipher option GenericTestUtils.AssertDoesNotMatch(logs.GetOutput(), "Server using cipher suite" ); // Check the IOStreamPair GenericTestUtils.AssertDoesNotMatch(logs1.GetOutput(), "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream." ); } } finally { if (cluster != null) { cluster.Shutdown(); } } }
public FileInfoEx(string fileName, int rootDirectoryLength) { myFileInfo = new FileInfo(fileName); myFileVersion = FileVersionInfo.GetVersionInfo(fileName).FileVersion; if (myFileVersion != null) { myFileVersion = myFileVersion.Replace(", ", "."); } myHash = FileChecksum.GetSHA256Checksum(fileName); RelativeName = fileName.Substring(rootDirectoryLength + 1); }
public override void Prepare(Sources.IUpdateSource source) { if (string.IsNullOrEmpty(LocalPath)) { UpdateManager.Instance.Logger.Log(Logger.SeverityLevel.Warning, "FileUpdateTask: LocalPath is empty, task is a noop"); return; // Errorneous case, but there's nothing to prepare to, and by default we prefer a noop over an error } string fileName; if (!string.IsNullOrEmpty(UpdateTo)) { fileName = UpdateTo; } else { fileName = LocalPath; } _tempFile = null; string baseUrl = UpdateManager.Instance.BaseUrl; string tempFileLocal = Path.Combine(UpdateManager.Instance.Config.TempFolder, Guid.NewGuid().ToString()); UpdateManager.Instance.Logger.Log("FileUpdateTask: Downloading {0} with BaseUrl of {1} to {2}", fileName, baseUrl, tempFileLocal); if (!source.GetData(fileName, baseUrl, OnProgress, ref tempFileLocal)) { throw new UpdateProcessFailedException("FileUpdateTask: Failed to get file from source"); } _tempFile = tempFileLocal; if (_tempFile == null) { throw new UpdateProcessFailedException("FileUpdateTask: Failed to get file from source"); } if (!string.IsNullOrEmpty(Sha256Checksum)) { string checksum = FileChecksum.GetSHA256Checksum(_tempFile); if (!checksum.Equals(Sha256Checksum)) { throw new UpdateProcessFailedException(string.Format( "FileUpdateTask: Checksums do not match; expected {0} but got {1}", Sha256Checksum, checksum)); } } _destinationFile = Path.Combine(Path.GetDirectoryName(UpdateManager.Instance.ApplicationPath), LocalPath); UpdateManager.Instance.Logger.Log("FileUpdateTask: Prepared successfully; destination file: {0}", _destinationFile); }
/// <summary> /// Converts a <code>FileChecksum</code> object into a JSON array /// object. /// </summary> /// <param name="checksum">file checksum.</param> /// <returns>The JSON representation of the file checksum.</returns> private static IDictionary FileChecksumToJSON(FileChecksum checksum) { IDictionary json = new LinkedHashMap(); json[HttpFSFileSystem.ChecksumAlgorithmJson] = checksum.GetAlgorithmName(); json[HttpFSFileSystem.ChecksumBytesJson] = StringUtils.ByteToHexString(checksum.GetBytes ()); json[HttpFSFileSystem.ChecksumLengthJson] = checksum.GetLength(); IDictionary response = new LinkedHashMap(); response[HttpFSFileSystem.FileChecksumJson] = json; return(response); }
public FileInfoEx(string fileName, int rootDirLength) { myFileInfo = new FileInfo(fileName); var verInfo = FileVersionInfo.GetVersionInfo(fileName); if (myFileVersion != null) { myFileVersion = new System.Version(verInfo.FileMajorPart, verInfo.FileMinorPart, verInfo.FileBuildPart, verInfo.FilePrivatePart).ToString(); } myHash = FileChecksum.GetSHA256Checksum(fileName); RelativeName = fileName.Substring(rootDirLength); }
/// <exception cref="Org.Xml.Sax.SAXException"/> public override void StartElement(string ns, string localname, string qname, Attributes attrs) { if (!typeof(MD5MD5CRC32FileChecksum).FullName.Equals(qname)) { if (typeof(RemoteException).Name.Equals(qname)) { throw new SAXException(RemoteException.ValueOf(attrs)); } throw new SAXException("Unrecognized entry: " + qname); } this.filechecksum = MD5MD5CRC32FileChecksum.ValueOf(attrs); }
public FileSystemScriptFiles(FileChecksum fileChecksum, ScriptFileTypeBase scriptFileType, string folderPath) { fileChecksum.ThrowIfNull(nameof(fileChecksum)); scriptFileType.ThrowIfNull(nameof(scriptFileType)); folderPath.ThrowIfNull(nameof(folderPath)); _fileChecksum = fileChecksum; ScriptFileType = scriptFileType; FolderPath = folderPath; Load(); }
private void AircraftTypeCodeUpdate() { try { var client = new RestClient(ROOT_URL); var request = new RestRequest(AIRCRAFT_DB_ENDPOINT, DataFormat.Json); var response = client.Execute(request).Content; var data = JsonConvert.DeserializeObject <AircraftTypeCodes>(response); if (data != null) { if (File.Exists(Path.Combine(mConfig.AppPath, "TypeCodes.json"))) { var hash = FileChecksum.CalculateCheckSum(Path.Combine(mConfig.AppPath, "TypeCodes.json")); if (hash != data.ChecksumHash) { using (WebClient wc = new WebClient()) { var json = wc.DownloadString(data.TypeCodesUrl); if (json != null) { File.WriteAllText(Path.Combine(mConfig.AppPath, "TypeCodes.json"), json); NotificationPosted?.Invoke(this, new NotificationPostedEventArgs(NotificationType.Info, "Aircraft type code database updated.")); } } } } else { using (WebClient wc = new WebClient()) { var json = wc.DownloadString(data.TypeCodesUrl); if (json != null) { File.WriteAllText(Path.Combine(mConfig.AppPath, "TypeCodes.json"), json); NotificationPosted?.Invoke(this, new NotificationPostedEventArgs(NotificationType.Info, "Aircraft type code database updated.")); } } } } } catch (Exception ex) { NotificationPosted?.Invoke(this, new NotificationPostedEventArgs(NotificationType.Error, $"Error downloading aircraft type code database: {ex.Message}")); } }
public virtual void TestLongLivedClient() { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = GetFileSystem(conf); WriteTestDataToFile(fs); NUnit.Framework.Assert.AreEqual(PlainText, DFSTestUtil.ReadFile(fs, TestPath)); FileChecksum checksum = fs.GetFileChecksum(TestPath); fs.Close(); cluster.Shutdown(); SetEncryptionConfigKeys(conf); cluster = new MiniDFSCluster.Builder(conf).ManageDataDfsDirs(false).ManageNameDfsDirs (false).Format(false).StartupOption(HdfsServerConstants.StartupOption.Regular).Build (); BlockTokenSecretManager btsm = cluster.GetNamesystem().GetBlockManager().GetBlockTokenSecretManager (); btsm.SetKeyUpdateIntervalForTesting(2 * 1000); btsm.SetTokenLifetime(2 * 1000); btsm.ClearAllKeysForTesting(); fs = GetFileSystem(conf); NUnit.Framework.Assert.AreEqual(PlainText, DFSTestUtil.ReadFile(fs, TestPath)); NUnit.Framework.Assert.AreEqual(checksum, fs.GetFileChecksum(TestPath)); // Sleep for 15 seconds, after which the encryption key will no longer be // valid. It needs to be a few multiples of the block token lifetime, // since several block tokens are valid at any given time (the current // and the last two, by default.) Log.Info("Sleeping so that encryption keys expire..."); Sharpen.Thread.Sleep(15 * 1000); Log.Info("Done sleeping."); NUnit.Framework.Assert.AreEqual(PlainText, DFSTestUtil.ReadFile(fs, TestPath)); NUnit.Framework.Assert.AreEqual(checksum, fs.GetFileChecksum(TestPath)); fs.Close(); } finally { if (cluster != null) { cluster.Shutdown(); } } }
/// <exception cref="System.Exception"/> public virtual void TestGetFileChecksum(Path foo, int appendLength) { int appendRounds = 16; FileChecksum[] fc = new FileChecksum[appendRounds + 1]; DFSTestUtil.CreateFile(dfs, foo, appendLength, Replication, 0L); fc[0] = dfs.GetFileChecksum(foo); for (int i = 0; i < appendRounds; i++) { DFSTestUtil.AppendFile(dfs, foo, appendLength); fc[i + 1] = dfs.GetFileChecksum(foo); } for (int i_1 = 0; i_1 < appendRounds + 1; i_1++) { FileChecksum checksum = dfs.GetFileChecksum(foo, appendLength * (i_1 + 1)); NUnit.Framework.Assert.IsTrue(checksum.Equals(fc[i_1])); } }
/// <exception cref="System.IO.IOException"/> protected internal override void ProcessPath(PathData item) { if (item.stat.IsDirectory()) { throw new PathIsDirectoryException(item.ToString()); } FileChecksum checksum = item.fs.GetFileChecksum(item.path); if (checksum == null) { @out.Printf("%s\tNONE\t%n", item.ToString()); } else { string checksumString = StringUtils.ByteToHexString(checksum.GetBytes(), 0, checksum .GetLength()); @out.Printf("%s\t%s\t%s%n", item.ToString(), checksum.GetAlgorithmName(), checksumString ); } }
/// <exception cref="System.Exception"/> private void TestChecksum() { if (!IsLocalFS()) { FileSystem fs = FileSystem.Get(GetProxiedFSConf()); fs.Mkdirs(GetProxiedFSTestDir()); Path path = new Path(GetProxiedFSTestDir(), "foo.txt"); OutputStream os = fs.Create(path); os.Write(1); os.Close(); FileChecksum hdfsChecksum = fs.GetFileChecksum(path); fs.Close(); fs = GetHttpFSFileSystem(); FileChecksum httpChecksum = fs.GetFileChecksum(path); fs.Close(); NUnit.Framework.Assert.AreEqual(httpChecksum.GetAlgorithmName(), hdfsChecksum.GetAlgorithmName ()); NUnit.Framework.Assert.AreEqual(httpChecksum.GetLength(), hdfsChecksum.GetLength( )); Assert.AssertArrayEquals(httpChecksum.GetBytes(), hdfsChecksum.GetBytes()); } }
public async Task SendFileAsync(FileTransferData data) { string futureFilePath = $"{(string.IsNullOrWhiteSpace(data.ControlSpace.Name) ? data.ControlSpace.ID.ToString() : data.ControlSpace.Name)}/{((data.Path != null) ? data.Path.Trim('/') + "/" : "")}"; string fullFilename = futureFilePath + data.FileName; if (File.Exists(fullFilename)) { if (data.Hash == FileChecksum.Calculate(fullFilename)) { #if DEBUG Console.WriteLine($"File {data.Path}\\{data.FileName} already exist"); #endif return; } } Directory.CreateDirectory(futureFilePath); File.WriteAllBytes(fullFilename, data.FileData); #if DEBUG Console.WriteLine($"{data.Path}\\{data.FileName} transfered"); #endif }
public virtual void TestGetFileChecksum() { // Create two different files in HDFS fileSystemTestHelper.CreateFile(fHdfs, someFile); FileSystemTestHelper.CreateFile(fHdfs, fileSystemTestHelper.GetTestRootPath(fHdfs , someFile + "other"), 1, 512); // Get checksum through ViewFS FileChecksum viewFSCheckSum = vfs.GetFileChecksum(new Path("/vfstmp/someFileForTestGetFileChecksum" )); // Get checksum through HDFS. FileChecksum hdfsCheckSum = fHdfs.GetFileChecksum(new Path(someFile)); // Get checksum of different file in HDFS FileChecksum otherHdfsFileCheckSum = fHdfs.GetFileChecksum(new Path(someFile + "other" )); // Checksums of the same file (got through HDFS and ViewFS should be same) NUnit.Framework.Assert.AreEqual("HDFS and ViewFS checksums were not the same", viewFSCheckSum , hdfsCheckSum); // Checksum of different files should be different. NUnit.Framework.Assert.IsFalse("Some other HDFS file which should not have had the same " + "checksum as viewFS did!", viewFSCheckSum.Equals(otherHdfsFileCheckSum)); }
public virtual void TestLongLivedReadClientAfterRestart() { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = GetFileSystem(conf); WriteTestDataToFile(fs); NUnit.Framework.Assert.AreEqual(PlainText, DFSTestUtil.ReadFile(fs, TestPath)); FileChecksum checksum = fs.GetFileChecksum(TestPath); fs.Close(); cluster.Shutdown(); SetEncryptionConfigKeys(conf); cluster = new MiniDFSCluster.Builder(conf).ManageDataDfsDirs(false).ManageNameDfsDirs (false).Format(false).StartupOption(HdfsServerConstants.StartupOption.Regular).Build (); fs = GetFileSystem(conf); NUnit.Framework.Assert.AreEqual(PlainText, DFSTestUtil.ReadFile(fs, TestPath)); NUnit.Framework.Assert.AreEqual(checksum, fs.GetFileChecksum(TestPath)); // Restart the NN and DN, after which the client's encryption key will no // longer be valid. cluster.RestartNameNode(); NUnit.Framework.Assert.IsTrue(cluster.RestartDataNode(0)); NUnit.Framework.Assert.AreEqual(PlainText, DFSTestUtil.ReadFile(fs, TestPath)); NUnit.Framework.Assert.AreEqual(checksum, fs.GetFileChecksum(TestPath)); fs.Close(); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestEncryptedReadAfterNameNodeRestart() { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = GetFileSystem(conf); WriteTestDataToFile(fs); NUnit.Framework.Assert.AreEqual(PlainText, DFSTestUtil.ReadFile(fs, TestPath)); FileChecksum checksum = fs.GetFileChecksum(TestPath); fs.Close(); cluster.Shutdown(); SetEncryptionConfigKeys(conf); cluster = new MiniDFSCluster.Builder(conf).ManageDataDfsDirs(false).ManageNameDfsDirs (false).Format(false).StartupOption(HdfsServerConstants.StartupOption.Regular).Build (); fs = GetFileSystem(conf); NUnit.Framework.Assert.AreEqual(PlainText, DFSTestUtil.ReadFile(fs, TestPath)); NUnit.Framework.Assert.AreEqual(checksum, fs.GetFileChecksum(TestPath)); fs.Close(); cluster.RestartNameNode(); fs = GetFileSystem(conf); NUnit.Framework.Assert.AreEqual(PlainText, DFSTestUtil.ReadFile(fs, TestPath)); NUnit.Framework.Assert.AreEqual(checksum, fs.GetFileChecksum(TestPath)); fs.Close(); } finally { if (cluster != null) { cluster.Shutdown(); } } }
/// <summary>Executes the filesystem operation.</summary> /// <param name="fs">filesystem instance to use.</param> /// <returns>a Map object (JSON friendly) with the file checksum.</returns> /// <exception cref="System.IO.IOException">thrown if an IO error occured.</exception> public virtual IDictionary Execute(FileSystem fs) { FileChecksum checksum = fs.GetFileChecksum(path); return(FileChecksumToJSON(checksum)); }
public void ComputeHash() { Hash = FileChecksum.Calculate(FileData); }
/// <summary> /// Test that we cannot read a file beyond its snapshot length /// when accessing it via a snapshot path. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestSnapshotfileLength() { hdfs.Mkdirs(sub); int bytesRead; byte[] buffer = new byte[Blocksize * 8]; int origLen = Blocksize + 1; int toAppend = Blocksize; FSDataInputStream fis = null; FileStatus fileStatus = null; // Create and write a file. Path file1 = new Path(sub, file1Name); DFSTestUtil.CreateFile(hdfs, file1, Blocksize, 0, Blocksize, Replication, Seed); DFSTestUtil.AppendFile(hdfs, file1, origLen); // Create a snapshot on the parent directory. hdfs.AllowSnapshot(sub); hdfs.CreateSnapshot(sub, snapshot1); Path file1snap1 = SnapshotTestHelper.GetSnapshotPath(sub, snapshot1, file1Name); FileChecksum snapChksum1 = hdfs.GetFileChecksum(file1snap1); Assert.AssertThat("file and snapshot file checksums are not equal", hdfs.GetFileChecksum (file1), CoreMatchers.Is(snapChksum1)); // Append to the file. FSDataOutputStream @out = hdfs.Append(file1); // Nothing has been appended yet. All checksums should still be equal. Assert.AssertThat("file and snapshot checksums (open for append) are not equal", hdfs.GetFileChecksum(file1), CoreMatchers.Is(snapChksum1)); Assert.AssertThat("snapshot checksum (post-open for append) has changed", hdfs.GetFileChecksum (file1snap1), CoreMatchers.Is(snapChksum1)); try { AppendTestUtil.Write(@out, 0, toAppend); // Test reading from snapshot of file that is open for append byte[] dataFromSnapshot = DFSTestUtil.ReadFileBuffer(hdfs, file1snap1); Assert.AssertThat("Wrong data size in snapshot.", dataFromSnapshot.Length, CoreMatchers.Is (origLen)); // Verify that checksum didn't change Assert.AssertThat("snapshot file checksum (pre-close) has changed", hdfs.GetFileChecksum (file1), CoreMatchers.Is(snapChksum1)); Assert.AssertThat("snapshot checksum (post-append) has changed", hdfs.GetFileChecksum (file1snap1), CoreMatchers.Is(snapChksum1)); } finally { @out.Close(); } Assert.AssertThat("file and snapshot file checksums (post-close) are equal", hdfs .GetFileChecksum(file1), CoreMatchers.Not(snapChksum1)); Assert.AssertThat("snapshot file checksum (post-close) has changed", hdfs.GetFileChecksum (file1snap1), CoreMatchers.Is(snapChksum1)); // Make sure we can read the entire file via its non-snapshot path. fileStatus = hdfs.GetFileStatus(file1); Assert.AssertThat(fileStatus.GetLen(), CoreMatchers.Is((long)origLen + toAppend)); fis = hdfs.Open(file1); bytesRead = fis.Read(0, buffer, 0, buffer.Length); Assert.AssertThat(bytesRead, CoreMatchers.Is(origLen + toAppend)); fis.Close(); // Try to open the file via its snapshot path. fis = hdfs.Open(file1snap1); fileStatus = hdfs.GetFileStatus(file1snap1); Assert.AssertThat(fileStatus.GetLen(), CoreMatchers.Is((long)origLen)); // Make sure we can only read up to the snapshot length. bytesRead = fis.Read(0, buffer, 0, buffer.Length); Assert.AssertThat(bytesRead, CoreMatchers.Is(origLen)); fis.Close(); byte[] dataFromSnapshot_1 = DFSTestUtil.ReadFileBuffer(hdfs, file1snap1); Assert.AssertThat("Wrong data size in snapshot.", dataFromSnapshot_1.Length, CoreMatchers.Is (origLen)); }
public ScriptFilesAsserts(FileChecksum fileChecksum, DBHandler dbHandler) { _fileChecksum = fileChecksum; _dbHandler = dbHandler; }
public ScriptFilesComparerFactory(FileChecksum fileChecksum) { _fileChecksum = fileChecksum; }
public FileChecksumResponse(FileChecksum checksum) { Checksum = checksum; }