internal OpenFileCtxCache(NfsConfiguration config, long streamTimeout) { // Insert and delete with openFileMap are synced maxStreams = config.GetInt(NfsConfigKeys.DfsNfsMaxOpenFilesKey, NfsConfigKeys.DfsNfsMaxOpenFilesDefault ); Log.Info("Maximum open streams is " + maxStreams); this.streamTimeout = streamTimeout; streamMonitor = new OpenFileCtxCache.StreamMonitor(this); }
public virtual void TestDeprecatedKeys() { NfsConfiguration conf = new NfsConfiguration(); conf.SetInt("nfs3.server.port", 998); NUnit.Framework.Assert.IsTrue(conf.GetInt(NfsConfigKeys.DfsNfsServerPortKey, 0) == 998); conf.SetInt("nfs3.mountd.port", 999); NUnit.Framework.Assert.IsTrue(conf.GetInt(NfsConfigKeys.DfsNfsMountdPortKey, 0) == 999); conf.Set("dfs.nfs.exports.allowed.hosts", "host1"); NUnit.Framework.Assert.IsTrue(conf.Get(CommonConfigurationKeys.NfsExportsAllowedHostsKey ).Equals("host1")); conf.SetInt("dfs.nfs.exports.cache.expirytime.millis", 1000); NUnit.Framework.Assert.IsTrue(conf.GetInt(Nfs3Constant.NfsExportsCacheExpirytimeMillisKey , 0) == 1000); conf.SetInt("hadoop.nfs.userupdate.milly", 10); NUnit.Framework.Assert.IsTrue(conf.GetInt(IdMappingConstant.UsergroupidUpdateMillisKey , 0) == 10); conf.Set("dfs.nfs3.dump.dir", "/nfs/tmp"); NUnit.Framework.Assert.IsTrue(conf.Get(NfsConfigKeys.DfsNfsFileDumpDirKey).Equals ("/nfs/tmp")); conf.SetBoolean("dfs.nfs3.enableDump", false); NUnit.Framework.Assert.IsTrue(conf.GetBoolean(NfsConfigKeys.DfsNfsFileDumpKey, true ) == false); conf.SetInt("dfs.nfs3.max.open.files", 500); NUnit.Framework.Assert.IsTrue(conf.GetInt(NfsConfigKeys.DfsNfsMaxOpenFilesKey, 0) == 500); conf.SetInt("dfs.nfs3.stream.timeout", 6000); NUnit.Framework.Assert.IsTrue(conf.GetInt(NfsConfigKeys.DfsNfsStreamTimeoutKey, 0 ) == 6000); conf.Set("dfs.nfs3.export.point", "/dir1"); NUnit.Framework.Assert.IsTrue(conf.Get(NfsConfigKeys.DfsNfsExportPointKey).Equals ("/dir1")); }
/// <exception cref="System.Exception"/> public virtual void Init(DaemonContext context) { System.Console.Error.WriteLine("Initializing privileged NFS client socket..."); NfsConfiguration conf = new NfsConfiguration(); int clientPort = conf.GetInt(NfsConfigKeys.DfsNfsRegistrationPortKey, NfsConfigKeys .DfsNfsRegistrationPortDefault); if (clientPort < 1 || clientPort > 1023) { throw new RuntimeException("Must start privileged NFS server with '" + NfsConfigKeys .DfsNfsRegistrationPortKey + "' configured to a " + "privileged port."); } registrationSocket = new DatagramSocket(new IPEndPoint("localhost", clientPort)); registrationSocket.SetReuseAddress(true); args = context.GetArguments(); }
/// <exception cref="System.IO.IOException"/> public RpcProgramMountd(NfsConfiguration config, DatagramSocket registrationSocket , bool allowInsecurePorts) : base("mountd", "localhost", config.GetInt(NfsConfigKeys.DfsNfsMountdPortKey, NfsConfigKeys .DfsNfsMountdPortDefault), Program, Version1, Version3, registrationSocket, allowInsecurePorts ) { // Note that RPC cache is not enabled exports = new AList <string>(); exports.AddItem(config.Get(NfsConfigKeys.DfsNfsExportPointKey, NfsConfigKeys.DfsNfsExportPointDefault )); this.hostsMatcher = NfsExports.GetInstance(config); this.mounts = Sharpen.Collections.SynchronizedList(new AList <MountEntry>()); UserGroupInformation.SetConfiguration(config); SecurityUtil.Login(config, NfsConfigKeys.DfsNfsKeytabFileKey, NfsConfigKeys.DfsNfsKerberosPrincipalKey ); this.dfsClient = new DFSClient(NameNode.GetAddress(config), config); }
internal WriteManager(IdMappingServiceProvider iug, NfsConfiguration config, bool aixCompatMode) { this.iug = iug; this.config = config; this.aixCompatMode = aixCompatMode; streamTimeout = config.GetLong(NfsConfigKeys.DfsNfsStreamTimeoutKey, NfsConfigKeys .DfsNfsStreamTimeoutDefault); Log.Info("Stream timeout is " + streamTimeout + "ms."); if (streamTimeout < NfsConfigKeys.DfsNfsStreamTimeoutMinDefault) { Log.Info("Reset stream timeout to minimum value " + NfsConfigKeys.DfsNfsStreamTimeoutMinDefault + "ms."); streamTimeout = NfsConfigKeys.DfsNfsStreamTimeoutMinDefault; } maxStreams = config.GetInt(NfsConfigKeys.DfsNfsMaxOpenFilesKey, NfsConfigKeys.DfsNfsMaxOpenFilesDefault ); Log.Info("Maximum open streams is " + maxStreams); this.fileContextCache = new OpenFileCtxCache(config, streamTimeout); }
/// <exception cref="System.Exception"/> public static void Main(string[] args) { Arrays.Fill(data1, unchecked ((byte)7)); Arrays.Fill(data2, unchecked ((byte)8)); Arrays.Fill(data3, unchecked ((byte)9)); // NFS3 Create request NfsConfiguration conf = new NfsConfiguration(); TestOutOfOrderWrite.WriteClient client = new TestOutOfOrderWrite.WriteClient("localhost" , conf.GetInt(NfsConfigKeys.DfsNfsServerPortKey, NfsConfigKeys.DfsNfsServerPortDefault ), Create(), false); client.Run(); while (handle == null) { Sharpen.Thread.Sleep(1000); System.Console.Out.WriteLine("handle is still null..."); } Log.Info("Send write1 request"); XDR writeReq; writeReq = Write(handle, unchecked ((int)(0x8000005c)), 2000, 1000, data3); Nfs3Utils.WriteChannel(channel, writeReq, 1); writeReq = Write(handle, unchecked ((int)(0x8000005d)), 1000, 1000, data2); Nfs3Utils.WriteChannel(channel, writeReq, 2); writeReq = Write(handle, unchecked ((int)(0x8000005e)), 0, 1000, data1); Nfs3Utils.WriteChannel(channel, writeReq, 3); }
/// <exception cref="System.IO.IOException"/> internal virtual void HandleWrite(DFSClient dfsClient, WRITE3Request request, Org.Jboss.Netty.Channel.Channel channel, int xid, Nfs3FileAttributes preOpAttr) { int count = request.GetCount(); byte[] data = ((byte[])request.GetData().Array()); if (data.Length < count) { WRITE3Response response = new WRITE3Response(Nfs3Status.Nfs3errInval); Nfs3Utils.WriteChannel(channel, response.Serialize(new XDR(), xid, new VerifierNone ()), xid); return; } FileHandle handle = request.GetHandle(); if (Log.IsDebugEnabled()) { Log.Debug("handleWrite " + request); } // Check if there is a stream to write FileHandle fileHandle = request.GetHandle(); OpenFileCtx openFileCtx = fileContextCache.Get(fileHandle); if (openFileCtx == null) { Log.Info("No opened stream for fileId: " + fileHandle.GetFileId()); string fileIdPath = Nfs3Utils.GetFileIdPath(fileHandle.GetFileId()); HdfsDataOutputStream fos = null; Nfs3FileAttributes latestAttr = null; try { int bufferSize = config.GetInt(CommonConfigurationKeysPublic.IoFileBufferSizeKey, CommonConfigurationKeysPublic.IoFileBufferSizeDefault); fos = dfsClient.Append(fileIdPath, bufferSize, EnumSet.Of(CreateFlag.Append), null , null); latestAttr = Nfs3Utils.GetFileAttr(dfsClient, fileIdPath, iug); } catch (RemoteException e) { IOException io = e.UnwrapRemoteException(); if (io is AlreadyBeingCreatedException) { Log.Warn("Can't append file: " + fileIdPath + ". Possibly the file is being closed. Drop the request: " + request + ", wait for the client to retry..."); return; } throw; } catch (IOException e) { Log.Error("Can't append to file: " + fileIdPath, e); if (fos != null) { fos.Close(); } WccData fileWcc = new WccData(Nfs3Utils.GetWccAttr(preOpAttr), preOpAttr); WRITE3Response response = new WRITE3Response(Nfs3Status.Nfs3errIo, fileWcc, count , request.GetStableHow(), Nfs3Constant.WriteCommitVerf); Nfs3Utils.WriteChannel(channel, response.Serialize(new XDR(), xid, new VerifierNone ()), xid); return; } // Add open stream string writeDumpDir = config.Get(NfsConfigKeys.DfsNfsFileDumpDirKey, NfsConfigKeys .DfsNfsFileDumpDirDefault); openFileCtx = new OpenFileCtx(fos, latestAttr, writeDumpDir + "/" + fileHandle.GetFileId (), dfsClient, iug, aixCompatMode, config); if (!AddOpenFileStream(fileHandle, openFileCtx)) { Log.Info("Can't add new stream. Close it. Tell client to retry."); try { fos.Close(); } catch (IOException e) { Log.Error("Can't close stream for fileId: " + handle.GetFileId(), e); } // Notify client to retry WccData fileWcc = new WccData(latestAttr.GetWccAttr(), latestAttr); WRITE3Response response = new WRITE3Response(Nfs3Status.Nfs3errJukebox, fileWcc, 0, request.GetStableHow(), Nfs3Constant.WriteCommitVerf); Nfs3Utils.WriteChannel(channel, response.Serialize(new XDR(), xid, new VerifierNone ()), xid); return; } if (Log.IsDebugEnabled()) { Log.Debug("Opened stream for appending file: " + fileHandle.GetFileId()); } } // Add write into the async job queue openFileCtx.ReceivedNewWrite(dfsClient, request, channel, xid, asyncDataService, iug); return; }
public virtual void TestOOOWrites() { NfsConfiguration config = new NfsConfiguration(); MiniDFSCluster cluster = null; RpcProgramNfs3 nfsd; int bufSize = 32; int numOOO = 3; SecurityHandler securityHandler = Org.Mockito.Mockito.Mock <SecurityHandler>(); Org.Mockito.Mockito.When(securityHandler.GetUser()).ThenReturn(Runtime.GetProperty ("user.name")); string currentUser = Runtime.GetProperty("user.name"); config.Set(DefaultImpersonationProvider.GetTestProvider().GetProxySuperuserGroupConfKey (currentUser), "*"); config.Set(DefaultImpersonationProvider.GetTestProvider().GetProxySuperuserIpConfKey (currentUser), "*"); ProxyUsers.RefreshSuperUserGroupsConfiguration(config); // Use emphral port in case tests are running in parallel config.SetInt("nfs3.mountd.port", 0); config.SetInt("nfs3.server.port", 0); try { cluster = new MiniDFSCluster.Builder(config).NumDataNodes(1).Build(); cluster.WaitActive(); Org.Apache.Hadoop.Hdfs.Nfs.Nfs3.Nfs3 nfs3 = new Org.Apache.Hadoop.Hdfs.Nfs.Nfs3.Nfs3 (config); nfs3.StartServiceInternal(false); nfsd = (RpcProgramNfs3)nfs3.GetRpcProgram(); DFSClient dfsClient = new DFSClient(NameNode.GetAddress(config), config); HdfsFileStatus status = dfsClient.GetFileInfo("/"); FileHandle rootHandle = new FileHandle(status.GetFileId()); CREATE3Request createReq = new CREATE3Request(rootHandle, "out-of-order-write" + Runtime.CurrentTimeMillis(), Nfs3Constant.CreateUnchecked, new SetAttr3(), 0); XDR createXdr = new XDR(); createReq.Serialize(createXdr); CREATE3Response createRsp = nfsd.Create(createXdr.AsReadOnlyWrap(), securityHandler , new IPEndPoint("localhost", 1234)); FileHandle handle = createRsp.GetObjHandle(); byte[][] oooBuf = new byte[][] { new byte[bufSize], new byte[bufSize], new byte[bufSize ] }; for (int i = 0; i < numOOO; i++) { Arrays.Fill(oooBuf[i], unchecked ((byte)i)); } for (int i_1 = 0; i_1 < numOOO; i_1++) { long offset = (numOOO - 1 - i_1) * bufSize; WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize, Nfs3Constant.WriteStableHow .Unstable, ByteBuffer.Wrap(oooBuf[i_1])); XDR writeXdr = new XDR(); writeReq.Serialize(writeXdr); nfsd.Write(writeXdr.AsReadOnlyWrap(), null, 1, securityHandler, new IPEndPoint("localhost" , 1234)); } WaitWrite(nfsd, handle, 60000); READ3Request readReq = new READ3Request(handle, bufSize, bufSize); XDR readXdr = new XDR(); readReq.Serialize(readXdr); READ3Response readRsp = nfsd.Read(readXdr.AsReadOnlyWrap(), securityHandler, new IPEndPoint("localhost", config.GetInt(NfsConfigKeys.DfsNfsServerPortKey, NfsConfigKeys .DfsNfsServerPortDefault))); NUnit.Framework.Assert.IsTrue(Arrays.Equals(oooBuf[1], ((byte[])readRsp.GetData() .Array()))); } finally { if (cluster != null) { cluster.Shutdown(); } } }