// TODO: maybe not efficient /// <exception cref="System.IO.IOException"/> public static WccData CreateWccData(WccAttr preOpAttr, DFSClient dfsClient, string fileIdPath, IdMappingServiceProvider iug) { Nfs3FileAttributes postOpDirAttr = GetFileAttr(dfsClient, fileIdPath, iug); return(new WccData(preOpAttr, postOpDirAttr)); }
/// <exception cref="System.IO.IOException"/> internal static void AssertReports(int numDatanodes, HdfsConstants.DatanodeReportType type, DFSClient client, IList <DataNode> datanodes, string bpid) { DatanodeInfo[] infos = client.DatanodeReport(type); NUnit.Framework.Assert.AreEqual(numDatanodes, infos.Length); DatanodeStorageReport[] reports = client.GetDatanodeStorageReport(type); NUnit.Framework.Assert.AreEqual(numDatanodes, reports.Length); for (int i = 0; i < infos.Length; i++) { NUnit.Framework.Assert.AreEqual(infos[i], reports[i].GetDatanodeInfo()); DataNode d = FindDatanode(infos[i].GetDatanodeUuid(), datanodes); if (bpid != null) { //check storage StorageReport[] computed = reports[i].GetStorageReports(); Arrays.Sort(computed, Cmp); StorageReport[] expected = d.GetFSDataset().GetStorageReports(bpid); Arrays.Sort(expected, Cmp); NUnit.Framework.Assert.AreEqual(expected.Length, computed.Length); for (int j = 0; j < expected.Length; j++) { NUnit.Framework.Assert.AreEqual(expected[j].GetStorage().GetStorageID(), computed [j].GetStorage().GetStorageID()); } } } }
/* fsid */ /// <exception cref="System.IO.IOException"/> public static Nfs3FileAttributes GetFileAttr(DFSClient client, string fileIdPath, IdMappingServiceProvider iug) { HdfsFileStatus fs = GetFileStatus(client, fileIdPath); return(fs == null ? null : GetNfs3FileAttrFromFileStatus(fs, iug)); }
public virtual void TestCheckCommitAixCompatMode() { DFSClient dfsClient = Org.Mockito.Mockito.Mock <DFSClient>(); Nfs3FileAttributes attr = new Nfs3FileAttributes(); HdfsDataOutputStream fos = Org.Mockito.Mockito.Mock <HdfsDataOutputStream>(); NfsConfiguration conf = new NfsConfiguration(); conf.SetBoolean(NfsConfigKeys.LargeFileUpload, false); // Enable AIX compatibility mode. OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping (new NfsConfiguration()), true, conf); // Test fall-through to pendingWrites check in the event that commitOffset // is greater than the number of bytes we've so far flushed. Org.Mockito.Mockito.When(fos.GetPos()).ThenReturn((long)2); OpenFileCtx.COMMIT_STATUS status = ctx.CheckCommitInternal(5, null, 1, attr, false ); NUnit.Framework.Assert.IsTrue(status == OpenFileCtx.COMMIT_STATUS.CommitFinished); // Test the case when we actually have received more bytes than we're trying // to commit. ctx.GetPendingWritesForTest()[new OffsetRange(0, 10)] = new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null); Org.Mockito.Mockito.When(fos.GetPos()).ThenReturn((long)10); ctx.SetNextOffsetForTest((long)10); status = ctx.CheckCommitInternal(5, null, 1, attr, false); NUnit.Framework.Assert.IsTrue(status == OpenFileCtx.COMMIT_STATUS.CommitDoSync); }
public virtual void TestSmallAppendRace() { Path file = new Path("/testSmallAppendRace"); string fName = file.ToUri().GetPath(); // Create the file and write a small amount of data. FSDataOutputStream stm = fs.Create(file); AppendTestUtil.Write(stm, 0, 123); stm.Close(); // Introduce a delay between getFileInfo and calling append() against NN. DFSClient client = DFSClientAdapter.GetDFSClient(fs); DFSClient spyClient = Org.Mockito.Mockito.Spy(client); Org.Mockito.Mockito.When(spyClient.GetFileInfo(fName)).ThenAnswer(new _Answer_548 (client, fName)); DFSClientAdapter.SetDFSClient(fs, spyClient); // Create two threads for doing appends to the same file. Sharpen.Thread worker1 = new _Thread_564(this, file); Sharpen.Thread worker2 = new _Thread_574(this, file); worker1.Start(); worker2.Start(); // append will fail when the file size crosses the checksum chunk boundary, // if append was called with a stale file stat. DoSmallAppends(file, fs, 20); }
public virtual void TestLinkTargetNonSymlink() { FileContext fc = null; Path notSymlink = new Path("/notasymlink"); try { fc = FileContext.GetFileContext(cluster.GetFileSystem().GetUri()); fc.Create(notSymlink, EnumSet.Of(CreateFlag.Create)); DFSClient client = new DFSClient(cluster.GetFileSystem().GetUri(), cluster.GetConfiguration (0)); try { client.GetLinkTarget(notSymlink.ToString()); NUnit.Framework.Assert.Fail("Expected exception for resolving non-symlink"); } catch (IOException e) { GenericTestUtils.AssertExceptionContains("is not a symbolic link", e); } } finally { if (fc != null) { fc.Delete(notSymlink, false); } } }
public virtual void TestCheckSequential() { DFSClient dfsClient = Org.Mockito.Mockito.Mock <DFSClient>(); Nfs3FileAttributes attr = new Nfs3FileAttributes(); HdfsDataOutputStream fos = Org.Mockito.Mockito.Mock <HdfsDataOutputStream>(); Org.Mockito.Mockito.When(fos.GetPos()).ThenReturn((long)0); NfsConfiguration config = new NfsConfiguration(); config.SetBoolean(NfsConfigKeys.LargeFileUpload, false); OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping (config), false, config); ctx.GetPendingWritesForTest()[new OffsetRange(5, 10)] = new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null); ctx.GetPendingWritesForTest()[new OffsetRange(10, 15)] = new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null); ctx.GetPendingWritesForTest()[new OffsetRange(20, 25)] = new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null); NUnit.Framework.Assert.IsTrue(!ctx.CheckSequential(5, 4)); NUnit.Framework.Assert.IsTrue(ctx.CheckSequential(9, 5)); NUnit.Framework.Assert.IsTrue(ctx.CheckSequential(10, 5)); NUnit.Framework.Assert.IsTrue(ctx.CheckSequential(14, 5)); NUnit.Framework.Assert.IsTrue(!ctx.CheckSequential(15, 5)); NUnit.Framework.Assert.IsTrue(!ctx.CheckSequential(20, 5)); NUnit.Framework.Assert.IsTrue(!ctx.CheckSequential(25, 5)); NUnit.Framework.Assert.IsTrue(!ctx.CheckSequential(999, 5)); }
/// <exception cref="Javax.Servlet.ServletException"/> /// <exception cref="System.IO.IOException"/> protected override void DoGet(HttpServletRequest request, HttpServletResponse response ) { PrintWriter @out = response.GetWriter(); string path = ServletUtil.GetDecodedPath(request, "/getFileChecksum"); XMLOutputter xml = new XMLOutputter(@out, "UTF-8"); xml.Declaration(); ServletContext context = GetServletContext(); DataNode datanode = (DataNode)context.GetAttribute("datanode"); Configuration conf = new HdfsConfiguration(datanode.GetConf()); try { DFSClient dfs = DatanodeJspHelper.GetDFSClient(request, datanode, conf, GetUGI(request , conf)); MD5MD5CRC32FileChecksum checksum = dfs.GetFileChecksum(path, long.MaxValue); MD5MD5CRC32FileChecksum.Write(xml, checksum); } catch (IOException ioe) { WriteXml(ioe, path, xml); } catch (Exception e) { WriteXml(e, path, xml); } xml.EndDocument(); }
/// <exception cref="System.IO.IOException"/> private void Renew() { IList <DFSClient> copies; lock (this) { copies = new AList <DFSClient>(dfsclients); } //sort the client names for finding out repeated names. copies.Sort(new _IComparer_412()); string previousName = string.Empty; for (int i = 0; i < copies.Count; i++) { DFSClient c = copies[i]; //skip if current client name is the same as the previous name. if (!c.GetClientName().Equals(previousName)) { if (!c.RenewLease()) { if (Log.IsDebugEnabled()) { Log.Debug("Did not renew lease for client " + c); } continue; } previousName = c.GetClientName(); if (Log.IsDebugEnabled()) { Log.Debug("Lease renewed for client " + previousName); } } } }
public virtual void EnsureInvalidBlockTokensAreRejected() { cluster.TransitionToActive(0); FileSystem fs = HATestUtil.ConfigureFailoverFs(cluster, conf); DFSTestUtil.WriteFile(fs, TestPath, TestData); NUnit.Framework.Assert.AreEqual(TestData, DFSTestUtil.ReadFile(fs, TestPath)); DFSClient dfsClient = DFSClientAdapter.GetDFSClient((DistributedFileSystem)fs); DFSClient spyDfsClient = Org.Mockito.Mockito.Spy(dfsClient); Org.Mockito.Mockito.DoAnswer(new _Answer_121()).When(spyDfsClient).GetLocatedBlocks (Org.Mockito.Mockito.AnyString(), Org.Mockito.Mockito.AnyLong(), Org.Mockito.Mockito .AnyLong()); // This will make the token invalid, since the password // won't match anymore DFSClientAdapter.SetDFSClient((DistributedFileSystem)fs, spyDfsClient); try { NUnit.Framework.Assert.AreEqual(TestData, DFSTestUtil.ReadFile(fs, TestPath)); NUnit.Framework.Assert.Fail("Shouldn't have been able to read a file with invalid block tokens" ); } catch (IOException ioe) { GenericTestUtils.AssertExceptionContains("Could not obtain block", ioe); } }
/// <summary>Add a client.</summary> private void AddClient(DFSClient dfsc) { lock (this) { foreach (DFSClient c in dfsclients) { if (c == dfsc) { //client already exists, nothing to do. return; } } //client not found, add it dfsclients.AddItem(dfsc); //update renewal time if (dfsc.GetHdfsTimeout() > 0) { long half = dfsc.GetHdfsTimeout() / 2; if (half < renewal) { this.renewal = half; } } } }
/// <exception cref="System.Exception"/> public override FSDataInputStream Load(DFSClientCache.DFSInputStreamCaheKey key) { DFSClient client = this._enclosing.GetDfsClient(key.userId); DFSInputStream dis = client.Open(key.inodePath); return(client.CreateWrappedInputStream(dis)); }
private RemoteBlockReader(string file, string bpid, long blockId, DataInputStream @in, DataChecksum checksum, bool verifyChecksum, long startOffset, long firstChunkOffset , long bytesToRead, Peer peer, DatanodeID datanodeID, PeerCache peerCache) : base(new Path("/" + Block.BlockFilePrefix + blockId + ":" + bpid + ":of:" + file ), 1, verifyChecksum, checksum.GetChecksumSize() > 0 ? checksum : null, checksum .GetBytesPerChecksum(), checksum.GetChecksumSize()) { // Path is used only for printing block and file information in debug /*too non path-like?*/ this.isLocal = DFSClient.IsLocalAddress(NetUtils.CreateSocketAddr(datanodeID.GetXferAddr ())); this.peer = peer; this.datanodeID = datanodeID; this.@in = @in; this.checksum = checksum; this.startOffset = Math.Max(startOffset, 0); this.blockId = blockId; // The total number of bytes that we need to transfer from the DN is // the amount that the user wants (bytesToRead), plus the padding at // the beginning in order to chunk-align. Note that the DN may elect // to send more than this amount if the read starts/ends mid-chunk. this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset); this.firstChunkOffset = firstChunkOffset; lastChunkOffset = firstChunkOffset; lastChunkLen = -1; bytesPerChecksum = this.checksum.GetBytesPerChecksum(); checksumSize = this.checksum.GetChecksumSize(); this.peerCache = peerCache; }
// expected because the old active will be unable to flush the // end-of-segment op since it is fenced /// <exception cref="System.IO.IOException"/> /// <exception cref="System.Exception"/> /// <exception cref="Org.Apache.Hadoop.Hdfs.Inotify.MissingEventsException"/> public virtual void TestReadEventsWithTimeout() { Configuration conf = new HdfsConfiguration(); MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).Build(); try { cluster.GetDfsCluster().WaitActive(); cluster.GetDfsCluster().TransitionToActive(0); DFSClient client = new DFSClient(cluster.GetDfsCluster().GetNameNode(0).GetNameNodeAddress (), conf); DFSInotifyEventInputStream eis = client.GetInotifyEventStream(); ScheduledExecutorService ex = Executors.NewSingleThreadScheduledExecutor(); ex.Schedule(new _Runnable_463(client), 1, TimeUnit.Seconds); // test will fail // a very generous wait period -- the edit will definitely have been // processed by the time this is up EventBatch batch = eis.Poll(5, TimeUnit.Seconds); NUnit.Framework.Assert.IsNotNull(batch); NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length); NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType .Create); NUnit.Framework.Assert.AreEqual("/dir", ((Event.CreateEvent)batch.GetEvents()[0]) .GetPath()); } finally { cluster.Shutdown(); } }
internal HdfsWriter(DFSClient client, OutputStream @out, DefaultHttpResponse response ) { this.client = client; this.@out = @out; this.response = response; }
public virtual void TestManyDfsClientsWhereSomeNotOpen() { // First DFSClient has no files open so doesn't renew leases. DFSClient mockClient1 = CreateMockClient(); Org.Mockito.Mockito.DoReturn(false).When(mockClient1).RenewLease(); NUnit.Framework.Assert.AreSame(renewer, LeaseRenewer.GetInstance(FakeAuthority, FakeUgiA , mockClient1)); // Set up a file so that we start renewing our lease. DFSOutputStream mockStream1 = Org.Mockito.Mockito.Mock <DFSOutputStream>(); long fileId = 456L; renewer.Put(fileId, mockStream1, mockClient1); // Second DFSClient does renew lease DFSClient mockClient2 = CreateMockClient(); Org.Mockito.Mockito.DoReturn(true).When(mockClient2).RenewLease(); NUnit.Framework.Assert.AreSame(renewer, LeaseRenewer.GetInstance(FakeAuthority, FakeUgiA , mockClient2)); // Set up a file so that we start renewing our lease. DFSOutputStream mockStream2 = Org.Mockito.Mockito.Mock <DFSOutputStream>(); renewer.Put(fileId, mockStream2, mockClient2); // Wait for lease to get renewed GenericTestUtils.WaitFor(new _Supplier_156(mockClient1, mockClient2), 100, 10000); // should not throw! renewer.CloseFile(fileId, mockClient1); renewer.CloseFile(fileId, mockClient2); }
/// <param name="blockSize"/> /// <param name="perVolumeCapacity"> /// limit the capacity of each volume to the given /// value. If negative, then don't limit. /// </param> /// <exception cref="System.IO.IOException"/> private void StartCluster(int blockSize, int numDatanodes, long perVolumeCapacity ) { InitConfig(blockSize); cluster = new MiniDFSCluster.Builder(conf).StoragesPerDatanode(StoragesPerDatanode ).NumDataNodes(numDatanodes).Build(); fs = cluster.GetFileSystem(); client = fs.GetClient(); cluster.WaitActive(); if (perVolumeCapacity >= 0) { foreach (DataNode dn in cluster.GetDataNodes()) { foreach (FsVolumeSpi volume in dn.GetFSDataset().GetVolumes()) { ((FsVolumeImpl)volume).SetCapacityForTesting(perVolumeCapacity); } } } if (numDatanodes == 1) { IList <FsVolumeSpi> volumes = cluster.GetDataNodes()[0].GetFSDataset().GetVolumes( ); Assert.AssertThat(volumes.Count, IS.Is(1)); singletonVolume = ((FsVolumeImpl)volumes[0]); } }
public virtual void TestHedgedReadLoopTooManyTimes() { Configuration conf = new Configuration(); int numHedgedReadPoolThreads = 5; int hedgedReadTimeoutMillis = 50; conf.SetInt(DFSConfigKeys.DfsDfsclientHedgedReadThreadpoolSize, numHedgedReadPoolThreads ); conf.SetLong(DFSConfigKeys.DfsDfsclientHedgedReadThresholdMillis, hedgedReadTimeoutMillis ); conf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 0); // Set up the InjectionHandler DFSClientFaultInjector.instance = Org.Mockito.Mockito.Mock <DFSClientFaultInjector >(); DFSClientFaultInjector injector = DFSClientFaultInjector.instance; int sleepMs = 100; Org.Mockito.Mockito.DoAnswer(new _Answer_296(hedgedReadTimeoutMillis, sleepMs)).When (injector).FetchFromDatanodeException(); Org.Mockito.Mockito.DoAnswer(new _Answer_309(sleepMs)).When(injector).ReadFromDatanodeDelay (); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Format( true).Build(); DistributedFileSystem fileSys = cluster.GetFileSystem(); DFSClient dfsClient = fileSys.GetClient(); FSDataOutputStream output = null; DFSInputStream input = null; string filename = "/hedgedReadMaxOut.dat"; try { Path file = new Path(filename); output = fileSys.Create(file, (short)2); byte[] data = new byte[64 * 1024]; output.Write(data); output.Flush(); output.Write(data); output.Flush(); output.Write(data); output.Flush(); output.Close(); byte[] buffer = new byte[64 * 1024]; input = dfsClient.Open(filename); input.Read(0, buffer, 0, 1024); input.Close(); NUnit.Framework.Assert.AreEqual(3, input.GetHedgedReadOpsLoopNumForTesting()); } catch (BlockMissingException) { NUnit.Framework.Assert.IsTrue(false); } finally { Org.Mockito.Mockito.Reset(injector); IOUtils.Cleanup(null, input); IOUtils.Cleanup(null, output); fileSys.Close(); cluster.Shutdown(); } }
/// <summary>Close a file.</summary> internal virtual void CloseFile(long inodeId, DFSClient dfsc) { dfsc.RemoveFileBeingWritten(inodeId); lock (this) { if (dfsc.IsFilesBeingWrittenEmpty()) { dfsclients.Remove(dfsc); } //update emptyTime if necessary if (emptyTime == long.MaxValue) { foreach (DFSClient c in dfsclients) { if (!c.IsFilesBeingWrittenEmpty()) { //found a non-empty file-being-written map return; } } //discover the first time that all file-being-written maps are empty. emptyTime = Time.MonotonicNow(); } } }
/// <summary>Close all DFSClient instances in the Cache.</summary> /// <param name="onlyAutomatic">only close those that are marked for automatic closing /// </param> /// <exception cref="System.IO.IOException"/> internal virtual void CloseAll(bool onlyAutomatic) { lock (this) { IList <IOException> exceptions = new AList <IOException>(); ConcurrentMap <string, DFSClient> map = clientCache.AsMap(); foreach (KeyValuePair <string, DFSClient> item in map) { DFSClient client = item.Value; if (client != null) { try { client.Close(); } catch (IOException ioe) { exceptions.AddItem(ioe); } } } if (!exceptions.IsEmpty()) { throw MultipleIOException.CreateIOException(exceptions); } } }
/// <exception cref="System.IO.IOException"/> private void OnGetFileChecksum(ChannelHandlerContext ctx) { MD5MD5CRC32FileChecksum checksum = null; string nnId = @params.NamenodeId(); DFSClient dfsclient = NewDfsClient(nnId, conf); try { checksum = dfsclient.GetFileChecksum(path, long.MaxValue); dfsclient.Close(); dfsclient = null; } finally { IOUtils.Cleanup(Log, dfsclient); } byte[] js = Sharpen.Runtime.GetBytesForString(JsonUtil.ToJsonString(checksum), Charsets .Utf8); DefaultFullHttpResponse resp = new DefaultFullHttpResponse(HttpVersion.Http11, HttpResponseStatus .Ok, Unpooled.WrappedBuffer(js)); resp.Headers().Set(HttpHeaders.Names.ContentType, ApplicationJsonUtf8); resp.Headers().Set(HttpHeaders.Names.ContentLength, js.Length); resp.Headers().Set(HttpHeaders.Names.Connection, HttpHeaders.Values.Close); ctx.WriteAndFlush(resp).AddListener(ChannelFutureListener.Close); }
public virtual void TestAbandonBlock() { string src = FileNamePrefix + "foo"; // Start writing a file but do not close it FSDataOutputStream fout = fs.Create(new Path(src), true, 4096, (short)1, 512L); for (int i = 0; i < 1024; i++) { fout.Write(123); } fout.Hflush(); long fileId = ((DFSOutputStream)fout.GetWrappedStream()).GetFileId(); // Now abandon the last block DFSClient dfsclient = DFSClientAdapter.GetDFSClient(fs); LocatedBlocks blocks = dfsclient.GetNamenode().GetBlockLocations(src, 0, int.MaxValue ); int orginalNumBlocks = blocks.LocatedBlockCount(); LocatedBlock b = blocks.GetLastLocatedBlock(); dfsclient.GetNamenode().AbandonBlock(b.GetBlock(), fileId, src, dfsclient.clientName ); // call abandonBlock again to make sure the operation is idempotent dfsclient.GetNamenode().AbandonBlock(b.GetBlock(), fileId, src, dfsclient.clientName ); // And close the file fout.Close(); // Close cluster and check the block has been abandoned after restart cluster.RestartNameNode(); blocks = dfsclient.GetNamenode().GetBlockLocations(src, 0, int.MaxValue); NUnit.Framework.Assert.AreEqual("Blocks " + b + " has not been abandoned.", orginalNumBlocks , blocks.LocatedBlockCount() + 1); }
// Do a possible commit before read request in case there is buffered data // inside DFSClient which has been flushed but not synced. internal virtual int CommitBeforeRead(DFSClient dfsClient, FileHandle fileHandle, long commitOffset) { int status; OpenFileCtx openFileCtx = fileContextCache.Get(fileHandle); if (openFileCtx == null) { if (Log.IsDebugEnabled()) { Log.Debug("No opened stream for fileId: " + fileHandle.GetFileId() + " commitOffset=" + commitOffset + ". Return success in this case."); } status = Nfs3Status.Nfs3Ok; } else { // commit request triggered by read won't create pending comment obj OpenFileCtx.COMMIT_STATUS ret = openFileCtx.CheckCommit(dfsClient, commitOffset, null, 0, null, true); switch (ret) { case OpenFileCtx.COMMIT_STATUS.CommitFinished: case OpenFileCtx.COMMIT_STATUS.CommitInactiveCtx: { status = Nfs3Status.Nfs3Ok; break; } case OpenFileCtx.COMMIT_STATUS.CommitInactiveWithPendingWrite: case OpenFileCtx.COMMIT_STATUS.CommitError: { status = Nfs3Status.Nfs3errIo; break; } case OpenFileCtx.COMMIT_STATUS.CommitWait: case OpenFileCtx.COMMIT_STATUS.CommitSpecialWait: { status = Nfs3Status.Nfs3errJukebox; break; } case OpenFileCtx.COMMIT_STATUS.CommitSpecialSuccess: { // Read beyond eof could result in partial read status = Nfs3Status.Nfs3Ok; break; } default: { Log.Error("Should not get commit return code: " + ret.ToString()); throw new RuntimeException("Should not get commit return code: " + ret.ToString() ); } } } return(status); }
public virtual void TestClientThatDoesNotSupportEncryption() { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); // Set short retry timeouts so this test runs faster conf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 10); cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = GetFileSystem(conf); WriteTestDataToFile(fs); NUnit.Framework.Assert.AreEqual(PlainText, DFSTestUtil.ReadFile(fs, TestPath)); fs.Close(); cluster.Shutdown(); SetEncryptionConfigKeys(conf); cluster = new MiniDFSCluster.Builder(conf).ManageDataDfsDirs(false).ManageNameDfsDirs (false).Format(false).StartupOption(HdfsServerConstants.StartupOption.Regular).Build (); fs = GetFileSystem(conf); DFSClient client = DFSClientAdapter.GetDFSClient((DistributedFileSystem)fs); DFSClient spyClient = Org.Mockito.Mockito.Spy(client); Org.Mockito.Mockito.DoReturn(false).When(spyClient).ShouldEncryptData(); DFSClientAdapter.SetDFSClient((DistributedFileSystem)fs, spyClient); GenericTestUtils.LogCapturer logs = GenericTestUtils.LogCapturer.CaptureLogs(LogFactory .GetLog(typeof(DataNode))); try { NUnit.Framework.Assert.AreEqual(PlainText, DFSTestUtil.ReadFile(fs, TestPath)); if (resolverClazz != null && !resolverClazz.EndsWith("TestTrustedChannelResolver" )) { NUnit.Framework.Assert.Fail("Should not have been able to read without encryption enabled." ); } } catch (IOException ioe) { GenericTestUtils.AssertExceptionContains("Could not obtain block:", ioe); } finally { logs.StopCapturing(); } fs.Close(); if (resolverClazz == null) { GenericTestUtils.AssertMatches(logs.GetOutput(), "Failed to read expected encryption handshake from client at" ); } } finally { if (cluster != null) { cluster.Shutdown(); } } }
/// <exception cref="System.Exception"/> public static void SetupCluster(int replicationFactor, HdfsConfiguration conf) { util = new BlockReaderTestUtil(replicationFactor, conf); dfsClient = util.GetDFSClient(); long seed = Time.Now(); Log.Info("Random seed: " + seed); rand = new Random(seed); }
/// <summary>Get the list of Blocks for a file.</summary> /// <exception cref="System.IO.IOException"/> public virtual IList <LocatedBlock> GetFileBlocks(Path filepath, int sizeKB) { // Return the blocks we just wrote DFSClient dfsclient = GetDFSClient(); return(dfsclient.GetNamenode().GetBlockLocations(filepath.ToString(), 0, sizeKB * 1024).GetLocatedBlocks()); }
public virtual void StartUpCluster() { conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDatanodes).Build(); fs = cluster.GetFileSystem(); client = fs.GetClient(); bpid = cluster.GetNamesystem().GetBlockPoolId(); }
/// <summary> /// Test case that stops a writer after finalizing a block but /// before calling completeFile, recovers a file from another writer, /// starts writing from that writer, and then has the old lease holder /// call completeFile /// </summary> /// <exception cref="System.Exception"/> public virtual void TestCompleteOtherLeaseHoldersFile() { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(5).Build(); try { cluster.WaitActive(); NamenodeProtocols preSpyNN = cluster.GetNameNodeRpc(); NamenodeProtocols spyNN = Org.Mockito.Mockito.Spy(preSpyNN); // Delay completeFile GenericTestUtils.DelayAnswer delayer = new GenericTestUtils.DelayAnswer(Log); Org.Mockito.Mockito.DoAnswer(delayer).When(spyNN).Complete(Matchers.AnyString(), Matchers.AnyString(), (ExtendedBlock)Matchers.AnyObject(), Matchers.AnyLong()); DFSClient client = new DFSClient(null, spyNN, conf, null); file1 = new Path("/testCompleteOtherLease"); OutputStream stm = client.Create("/testCompleteOtherLease", true); // write 1/2 block AppendTestUtil.Write(stm, 0, 4096); AtomicReference <Exception> err = new AtomicReference <Exception>(); Sharpen.Thread t = new _Thread_242(stm, err); t.Start(); Log.Info("Waiting for close to get to latch..."); delayer.WaitForCall(); // At this point, the block is finalized on the DNs, but the file // has not been completed in the NN. // Lose the leases Log.Info("Killing lease checker"); client.GetLeaseRenewer().InterruptAndJoin(); FileSystem fs1 = cluster.GetFileSystem(); FileSystem fs2 = AppendTestUtil.CreateHdfsWithDifferentUsername(fs1.GetConf()); Log.Info("Recovering file"); RecoverFile(fs2); Log.Info("Opening file for append from new fs"); FSDataOutputStream appenderStream = fs2.Append(file1); Log.Info("Writing some data from new appender"); AppendTestUtil.Write(appenderStream, 0, 4096); Log.Info("Telling old close to proceed."); delayer.Proceed(); Log.Info("Waiting for close to finish."); t.Join(); Log.Info("Close finished."); // We expect that close will get a "Lease mismatch" // error. Exception thrownByClose = err.Get(); NUnit.Framework.Assert.IsNotNull(thrownByClose); NUnit.Framework.Assert.IsTrue(thrownByClose is IOException); if (!thrownByClose.Message.Contains("Lease mismatch")) { throw thrownByClose; } // The appender should be able to close properly appenderStream.Close(); } finally { cluster.Shutdown(); } }
private DFSClient CreateMockClient() { DFSClient mock = Org.Mockito.Mockito.Mock <DFSClient>(); Org.Mockito.Mockito.DoReturn(true).When(mock).IsClientRunning(); Org.Mockito.Mockito.DoReturn((int)FastGracePeriod).When(mock).GetHdfsTimeout(); Org.Mockito.Mockito.DoReturn("myclient").When(mock).GetClientName(); return(mock); }
/// <exception cref="System.IO.IOException"/> internal Writer(DFSClient client, int blockSize) { localClient = client; keepRunning = true; filesCreated = 0; numFailures = 0; // At least some of the files should span a block boundary. data = new byte[blockSize * 2]; }