/// <exception cref="Db4objects.Db4o.Ext.Db4oIOException"></exception> public override void Write(byte[] buffer, int length) { try { _delegate.Write(buffer, 0, length); } catch (IOException e) { throw new Db4oIOException(e); } }
public static void WritFile2(string fileName, byte[] data) { if (data == null && data.Length > 0) { return; } Java.IO.File file2 = new Java.IO.File(PATH); if (!file2.Exists()) { bool rss = file2.Mkdirs(); } string filePath = PATH + fileName; Java.IO.File file = new Java.IO.File(filePath); if (!file.Exists()) { try { // String aaa= mkdirs2(PATH); file.CreateNewFile(); Runtime runtime = Runtime.GetRuntime(); runtime.Exec("chmod 0666 " + file); } catch (Java.Lang.Exception ex) { ex.PrintStackTrace(); } } RandomAccessFile randomAccessFile = null; try { randomAccessFile = new RandomAccessFile(filePath, "rw"); randomAccessFile.Write(data); } catch (Java.Lang.Exception e) { e.PrintStackTrace(); } finally { try { if (randomAccessFile != null) { randomAccessFile.Close(); } } catch (Java.Lang.Exception e) { } } }
/// <exception cref="System.IO.IOException"></exception> public virtual void Write(string path, RandomAccessFile raf, bool writeTrash) { if (_offset == 0) { writeTrash = false; } raf.Seek(_offset); raf.Write(BytesToWrite(_data, writeTrash), 0, _length); Write(FileBasedTransactionLogHandler.LockFileName(path), _lockFileData, writeTrash ); Write(FileBasedTransactionLogHandler.LogFileName(path), _logFileData, writeTrash); }
public virtual void TestDisplayRecentEditLogOpCodes() { // start a cluster Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; FileSystem fileSys = null; cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).EnableManagedDfsDirsRedundancy (false).Build(); cluster.WaitActive(); fileSys = cluster.GetFileSystem(); FSNamesystem namesystem = cluster.GetNamesystem(); FSImage fsimage = namesystem.GetFSImage(); for (int i = 0; i < 20; i++) { fileSys.Mkdirs(new Path("/tmp/tmp" + i)); } Storage.StorageDirectory sd = fsimage.GetStorage().DirIterator(NNStorage.NameNodeDirType .Edits).Next(); cluster.Shutdown(); FilePath editFile = FSImageTestUtil.FindLatestEditsLog(sd).GetFile(); NUnit.Framework.Assert.IsTrue("Should exist: " + editFile, editFile.Exists()); // Corrupt the edits file. long fileLen = editFile.Length(); RandomAccessFile rwf = new RandomAccessFile(editFile, "rw"); rwf.Seek(fileLen - 40); for (int i_1 = 0; i_1 < 20; i_1++) { rwf.Write(FSEditLogOpCodes.OpDelete.GetOpCode()); } rwf.Close(); StringBuilder bld = new StringBuilder(); bld.Append("^Error replaying edit log at offset \\d+. "); bld.Append("Expected transaction ID was \\d+\n"); bld.Append("Recent opcode offsets: (\\d+\\s*){4}$"); try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).EnableManagedDfsDirsRedundancy (false).Format(false).Build(); NUnit.Framework.Assert.Fail("should not be able to start"); } catch (IOException e) { NUnit.Framework.Assert.IsTrue("error message contains opcodes message", e.Message .Matches(bld.ToString())); } }
/// <summary>Corrupt a block on a data node.</summary> /// <remarks> /// Corrupt a block on a data node. Replace the block file content with content /// of 1, 2, ...BLOCK_SIZE. /// </remarks> /// <param name="block">the ExtendedBlock to be corrupted</param> /// <param name="dn">the data node where the block needs to be corrupted</param> /// <exception cref="System.IO.FileNotFoundException"/> /// <exception cref="System.IO.IOException"/> private static void CorruptBlock(ExtendedBlock block, DataNode dn) { FilePath f = DataNodeTestUtils.GetBlockFile(dn, block.GetBlockPoolId(), block.GetLocalBlock ()); RandomAccessFile raFile = new RandomAccessFile(f, "rw"); byte[] bytes = new byte[(int)BlockSize]; for (int i = 0; i < BlockSize; i++) { bytes[i] = unchecked ((byte)(i)); } raFile.Write(bytes); raFile.Close(); }
/// <exception cref="System.IO.IOException"/> private void CreateFile(FilePath newFile, int size) { // write random data so that filesystems with compression enabled (e.g., ZFS) // can't compress the file Random random = new Random(); byte[] data = new byte[size]; random.NextBytes(data); newFile.CreateNewFile(); RandomAccessFile file = new RandomAccessFile(newFile, "rws"); file.Write(data); file.GetFD().Sync(); file.Close(); }
private void appendFileData(SaveData data) { RandomAccessFile randomAccessFile; try { randomAccessFile = new RandomAccessFile(lastSaveFile.AbsolutePath, "rw"); long length = randomAccessFile.Length(); randomAccessFile.Seek(length); randomAccessFile.Write(data.NeedParseData, data.OffSet, data.PlayoffSize); } catch (Exception) { } }
/// <exception cref="Db4objects.Db4o.Ext.Db4oIOException"></exception> public virtual void Write(long pos, byte[] buffer, int length) { CheckClosed(); try { Seek(pos); if (DTrace.enabled) { DTrace.FileWrite.LogLength(pos, length); } _file.Write(buffer, 0, length); } catch (IOException e) { throw new Db4oIOException(e); } }
private void Write(string fileName, byte[] bytes, bool writeTrash) { if (bytes == null) { return; } try { RandomAccessFile raf = new RandomAccessFile(fileName, "rw"); raf.Write(BytesToWrite(bytes, writeTrash)); raf.Close(); } catch (IOException e) { throw new Db4oException(e); } }
/// <exception cref="System.IO.IOException"/> public override void Setup(FilePath blockFile, bool usingChecksums) { RandomAccessFile bf = null; this.usingChecksums = usingChecksums; try { bf = new RandomAccessFile(blockFile, "rw"); bf.Write(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }); } finally { if (bf != null) { bf.Close(); } } }
/// <exception cref="System.IO.IOException"></exception> public override PackLock Parse(ProgressMonitor receiving, ProgressMonitor resolving ) { tmpPack = FilePath.CreateTempFile("incoming_", ".pack", db.GetDirectory()); tmpIdx = new FilePath(db.GetDirectory(), BaseName(tmpPack) + ".idx"); try { @out = new RandomAccessFile(tmpPack, "rw"); base.Parse(receiving, resolving); @out.Seek(packEnd); @out.Write(packHash); @out.GetChannel().Force(true); @out.Close(); WriteIdx(); tmpPack.SetReadOnly(); tmpIdx.SetReadOnly(); return(RenameAndOpenPack(GetLockMessage())); } finally { if (def != null) { def.Finish(); } try { if (@out != null && @out.GetChannel().IsOpen()) { @out.Close(); } } catch (IOException) { } // Ignored. We want to delete the file. CleanupTemporaryFiles(); } }
/// <summary> /// Write a sequence of bytes to this stream and advance the position of this stream. /// </summary> /// <param name="buffer">Destination</param> /// <param name="offset">Offset within the buffer</param> /// <param name="count">Number of bytes to write.</param> public override void Write(byte[] buffer, int offset, int count) { file.Write(buffer, offset, count); }
public virtual void Write(long pos, byte[] buf) { file.Seek(pos); file.Write(buf, 0, buf.Length); }
public virtual void TestPendingReplicationRetry() { MiniDFSCluster cluster = null; int numDataNodes = 4; string testFile = "/replication-test-file"; Path testPath = new Path(testFile); byte[] buffer = new byte[1024]; for (int i = 0; i < buffer.Length; i++) { buffer[i] = (byte)('1'); } try { Configuration conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsReplicationKey, Sharpen.Extensions.ToString(numDataNodes )); //first time format cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build(); cluster.WaitActive(); DFSClient dfsClient = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort ()), conf); OutputStream @out = cluster.GetFileSystem().Create(testPath); @out.Write(buffer); @out.Close(); WaitForBlockReplication(testFile, dfsClient.GetNamenode(), numDataNodes, -1); // get first block of the file. ExtendedBlock block = dfsClient.GetNamenode().GetBlockLocations(testFile, 0, long.MaxValue ).Get(0).GetBlock(); cluster.Shutdown(); for (int i_1 = 0; i_1 < 25; i_1++) { buffer[i_1] = (byte)('0'); } int fileCount = 0; // Choose 3 copies of block file - delete 1 and corrupt the remaining 2 for (int dnIndex = 0; dnIndex < 3; dnIndex++) { FilePath blockFile = cluster.GetBlockFile(dnIndex, block); Log.Info("Checking for file " + blockFile); if (blockFile != null && blockFile.Exists()) { if (fileCount == 0) { Log.Info("Deleting file " + blockFile); NUnit.Framework.Assert.IsTrue(blockFile.Delete()); } else { // corrupt it. Log.Info("Corrupting file " + blockFile); long len = blockFile.Length(); NUnit.Framework.Assert.IsTrue(len > 50); RandomAccessFile blockOut = new RandomAccessFile(blockFile, "rw"); try { blockOut.Seek(len / 3); blockOut.Write(buffer, 0, 25); } finally { blockOut.Close(); } } fileCount++; } } NUnit.Framework.Assert.AreEqual(3, fileCount); /* Start the MiniDFSCluster with more datanodes since once a writeBlock * to a datanode node fails, same block can not be written to it * immediately. In our case some replication attempts will fail. */ Log.Info("Restarting minicluster after deleting a replica and corrupting 2 crcs"); conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsReplicationKey, Sharpen.Extensions.ToString(numDataNodes )); conf.Set(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, Sharpen.Extensions.ToString (2)); conf.Set("dfs.datanode.block.write.timeout.sec", Sharpen.Extensions.ToString(5)); conf.Set(DFSConfigKeys.DfsNamenodeSafemodeThresholdPctKey, "0.75f"); // only 3 copies exist cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes * 2).Format( false).Build(); cluster.WaitActive(); dfsClient = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()), conf); WaitForBlockReplication(testFile, dfsClient.GetNamenode(), numDataNodes, -1); } finally { if (cluster != null) { cluster.Shutdown(); } } }
/// <exception cref="System.IO.IOException"></exception> protected internal override void OnStoreStream(byte[] raw, int pos, int len) { @out.Write(raw, pos, len); }
/// <exception cref="System.Exception"></exception> public void Run() { raf.Write(bytes); }
public virtual void TestAddHugeFile() { Measure("Commencing test"); FilePath file = new FilePath(db.WorkTree, "a.txt"); RandomAccessFile rf = new RandomAccessFile(file, "rw"); rf.SetLength(4429185024L); rf.Close(); Measure("Created file"); Git git = new Git(db); git.Add().AddFilepattern("a.txt").Call(); Measure("Added file"); NUnit.Framework.Assert.AreEqual("[a.txt, mode:100644, length:134217728, sha1:b8cfba97c2b962a44f080b3ca4e03b3204b6a350]" , IndexState(LENGTH | CONTENT_ID)); Status status = git.Status().Call(); Measure("Status after add"); AssertCollectionEquals(Arrays.AsList("a.txt"), status.GetAdded()); NUnit.Framework.Assert.AreEqual(0, status.GetChanged().Count); NUnit.Framework.Assert.AreEqual(0, status.GetConflicting().Count); NUnit.Framework.Assert.AreEqual(0, status.GetMissing().Count); NUnit.Framework.Assert.AreEqual(0, status.GetModified().Count); NUnit.Framework.Assert.AreEqual(0, status.GetRemoved().Count); NUnit.Framework.Assert.AreEqual(0, status.GetUntracked().Count); // Does not change anything, but modified timestamp rf = new RandomAccessFile(file, "rw"); rf.Write(0); rf.Close(); status = git.Status().Call(); Measure("Status after non-modifying update"); AssertCollectionEquals(Arrays.AsList("a.txt"), status.GetAdded()); NUnit.Framework.Assert.AreEqual(0, status.GetChanged().Count); NUnit.Framework.Assert.AreEqual(0, status.GetConflicting().Count); NUnit.Framework.Assert.AreEqual(0, status.GetMissing().Count); NUnit.Framework.Assert.AreEqual(0, status.GetModified().Count); NUnit.Framework.Assert.AreEqual(0, status.GetRemoved().Count); NUnit.Framework.Assert.AreEqual(0, status.GetUntracked().Count); // Change something rf = new RandomAccessFile(file, "rw"); rf.Write('a'); rf.Close(); status = git.Status().Call(); Measure("Status after modifying update"); AssertCollectionEquals(Arrays.AsList("a.txt"), status.GetAdded()); NUnit.Framework.Assert.AreEqual(0, status.GetChanged().Count); NUnit.Framework.Assert.AreEqual(0, status.GetConflicting().Count); NUnit.Framework.Assert.AreEqual(0, status.GetMissing().Count); AssertCollectionEquals(Arrays.AsList("a.txt"), status.GetModified()); NUnit.Framework.Assert.AreEqual(0, status.GetRemoved().Count); NUnit.Framework.Assert.AreEqual(0, status.GetUntracked().Count); // Truncate mod 4G and re-establish equality rf = new RandomAccessFile(file, "rw"); rf.SetLength(134217728L); rf.Write(0); rf.Close(); status = git.Status().Call(); Measure("Status after truncating update"); AssertCollectionEquals(Arrays.AsList("a.txt"), status.GetAdded()); NUnit.Framework.Assert.AreEqual(0, status.GetChanged().Count); NUnit.Framework.Assert.AreEqual(0, status.GetConflicting().Count); NUnit.Framework.Assert.AreEqual(0, status.GetMissing().Count); AssertCollectionEquals(Arrays.AsList("a.txt"), status.GetModified()); NUnit.Framework.Assert.AreEqual(0, status.GetRemoved().Count); NUnit.Framework.Assert.AreEqual(0, status.GetUntracked().Count); // Change something rf = new RandomAccessFile(file, "rw"); rf.Write('a'); rf.Close(); status = git.Status().Call(); Measure("Status after modifying and truncating update"); AssertCollectionEquals(Arrays.AsList("a.txt"), status.GetAdded()); NUnit.Framework.Assert.AreEqual(0, status.GetChanged().Count); NUnit.Framework.Assert.AreEqual(0, status.GetConflicting().Count); NUnit.Framework.Assert.AreEqual(0, status.GetMissing().Count); AssertCollectionEquals(Arrays.AsList("a.txt"), status.GetModified()); NUnit.Framework.Assert.AreEqual(0, status.GetRemoved().Count); NUnit.Framework.Assert.AreEqual(0, status.GetUntracked().Count); // Truncate to entry length becomes negative int rf = new RandomAccessFile(file, "rw"); rf.SetLength(3429185024L); rf.Write(0); rf.Close(); git.Add().AddFilepattern("a.txt").Call(); Measure("Added truncated file"); NUnit.Framework.Assert.AreEqual("[a.txt, mode:100644, length:-865782272, sha1:59b3282f8f59f22d953df956ad3511bf2dc660fd]" , IndexState(LENGTH | CONTENT_ID)); status = git.Status().Call(); Measure("Status after status on truncated file"); AssertCollectionEquals(Arrays.AsList("a.txt"), status.GetAdded()); NUnit.Framework.Assert.AreEqual(0, status.GetChanged().Count); NUnit.Framework.Assert.AreEqual(0, status.GetConflicting().Count); NUnit.Framework.Assert.AreEqual(0, status.GetMissing().Count); NUnit.Framework.Assert.AreEqual(0, status.GetModified().Count); NUnit.Framework.Assert.AreEqual(0, status.GetRemoved().Count); NUnit.Framework.Assert.AreEqual(0, status.GetUntracked().Count); // Change something rf = new RandomAccessFile(file, "rw"); rf.Write('a'); rf.Close(); status = git.Status().Call(); Measure("Status after modifying and truncating update"); AssertCollectionEquals(Arrays.AsList("a.txt"), status.GetAdded()); NUnit.Framework.Assert.AreEqual(0, status.GetChanged().Count); NUnit.Framework.Assert.AreEqual(0, status.GetConflicting().Count); NUnit.Framework.Assert.AreEqual(0, status.GetMissing().Count); AssertCollectionEquals(Arrays.AsList("a.txt"), status.GetModified()); NUnit.Framework.Assert.AreEqual(0, status.GetRemoved().Count); NUnit.Framework.Assert.AreEqual(0, status.GetUntracked().Count); git.Commit().SetMessage("make a commit").Call(); Measure("After commit"); status = git.Status().Call(); Measure("After status after commit"); NUnit.Framework.Assert.AreEqual(0, status.GetAdded().Count); NUnit.Framework.Assert.AreEqual(0, status.GetChanged().Count); NUnit.Framework.Assert.AreEqual(0, status.GetConflicting().Count); NUnit.Framework.Assert.AreEqual(0, status.GetMissing().Count); AssertCollectionEquals(Arrays.AsList("a.txt"), status.GetModified()); NUnit.Framework.Assert.AreEqual(0, status.GetRemoved().Count); NUnit.Framework.Assert.AreEqual(0, status.GetUntracked().Count); git.Reset().SetMode(ResetCommand.ResetType.HARD).Call(); Measure("After reset --hard"); NUnit.Framework.Assert.AreEqual("[a.txt, mode:100644, length:-865782272, sha1:59b3282f8f59f22d953df956ad3511bf2dc660fd]" , IndexState(LENGTH | CONTENT_ID)); status = git.Status().Call(); Measure("Status after hard reset"); NUnit.Framework.Assert.AreEqual(0, status.GetAdded().Count); NUnit.Framework.Assert.AreEqual(0, status.GetChanged().Count); NUnit.Framework.Assert.AreEqual(0, status.GetConflicting().Count); NUnit.Framework.Assert.AreEqual(0, status.GetMissing().Count); NUnit.Framework.Assert.AreEqual(0, status.GetModified().Count); NUnit.Framework.Assert.AreEqual(0, status.GetRemoved().Count); NUnit.Framework.Assert.AreEqual(0, status.GetUntracked().Count); }