//Do Nothing /// <exception cref="System.Exception"/> public virtual void TestWebHdfsDoAs() { WebHdfsTestUtil.Log.Info("START: testWebHdfsDoAs()"); WebHdfsTestUtil.Log.Info("ugi.getShortUserName()=" + ugi.GetShortUserName()); WebHdfsFileSystem webhdfs = WebHdfsTestUtil.GetWebHdfsFileSystemAs(ugi, config, WebHdfsFileSystem .Scheme); Path root = new Path("/"); cluster.GetFileSystem().SetPermission(root, new FsPermission((short)0x1ff)); Whitebox.SetInternalState(webhdfs, "ugi", proxyUgi); { Path responsePath = webhdfs.GetHomeDirectory(); WebHdfsTestUtil.Log.Info("responsePath=" + responsePath); NUnit.Framework.Assert.AreEqual(webhdfs.GetUri() + "/user/" + ProxyUser, responsePath .ToString()); } Path f = new Path("/testWebHdfsDoAs/a.txt"); { FSDataOutputStream @out = webhdfs.Create(f); @out.Write(Sharpen.Runtime.GetBytesForString("Hello, webhdfs user!")); @out.Close(); FileStatus status = webhdfs.GetFileStatus(f); WebHdfsTestUtil.Log.Info("status.getOwner()=" + status.GetOwner()); NUnit.Framework.Assert.AreEqual(ProxyUser, status.GetOwner()); } { FSDataOutputStream @out = webhdfs.Append(f); @out.Write(Sharpen.Runtime.GetBytesForString("\nHello again!")); @out.Close(); FileStatus status = webhdfs.GetFileStatus(f); WebHdfsTestUtil.Log.Info("status.getOwner()=" + status.GetOwner()); WebHdfsTestUtil.Log.Info("status.getLen() =" + status.GetLen()); NUnit.Framework.Assert.AreEqual(ProxyUser, status.GetOwner()); } }
/// <summary> /// On the second step of two-step write, expect connect timeout accessing the /// redirect location, because the connection backlog is consumed. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestTwoStepWriteConnectTimeout() { StartSingleTemporaryRedirectResponseThread(true); OutputStream os = null; try { os = fs.Create(new Path("/file")); NUnit.Framework.Assert.Fail("expected timeout"); } catch (SocketTimeoutException e) { NUnit.Framework.Assert.AreEqual("connect timed out", e.Message); } finally { IOUtils.Cleanup(Log, os); } }
/// <summary> /// Test get with offset and length parameters that combine to request a length /// greater than actual file length. /// </summary> /// <exception cref="System.IO.IOException"/> public virtual void TestOffsetPlusLengthParamsLongerThanFile() { WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs; Path dir = new Path("/test"); NUnit.Framework.Assert.IsTrue(webhdfs.Mkdirs(dir)); // Create a file with some content. Path testFile = new Path("/test/testOffsetPlusLengthParamsLongerThanFile"); string content = "testOffsetPlusLengthParamsLongerThanFile"; FSDataOutputStream testFileOut = webhdfs.Create(testFile); try { testFileOut.Write(Sharpen.Runtime.GetBytesForString(content, "US-ASCII")); } finally { IOUtils.CloseStream(testFileOut); } // Open the file, but request offset starting at 1 and length equal to file // length. Considering the offset, this is longer than the actual content. HttpOpParam.OP op = GetOpParam.OP.Open; Uri url = webhdfs.ToUrl(op, testFile, new LengthParam(Sharpen.Extensions.ValueOf( content.Length)), new OffsetParam(1L)); HttpURLConnection conn = null; InputStream @is = null; try { conn = (HttpURLConnection)url.OpenConnection(); conn.SetRequestMethod(op.GetType().ToString()); conn.SetDoOutput(op.GetDoOutput()); conn.SetInstanceFollowRedirects(true); // Expect OK response and Content-Length header equal to actual length. NUnit.Framework.Assert.AreEqual(HttpServletResponse.ScOk, conn.GetResponseCode()); NUnit.Framework.Assert.AreEqual((content.Length - 1).ToString(), conn.GetHeaderField ("Content-Length")); // Check content matches. byte[] respBody = new byte[content.Length - 1]; @is = conn.GetInputStream(); IOUtils.ReadFully(@is, respBody, 0, content.Length - 1); NUnit.Framework.Assert.AreEqual(Sharpen.Runtime.Substring(content, 1), Sharpen.Runtime.GetStringForBytes (respBody, "US-ASCII")); } finally { IOUtils.CloseStream(@is); if (conn != null) { conn.Disconnect(); } } }
public virtual void TestWebHdfsOffsetAndLength() { MiniDFSCluster cluster = null; Configuration conf = WebHdfsTestUtil.CreateConf(); int Offset = 42; int Length = 512; string Path = "/foo"; byte[] Contents = new byte[1024]; Random.NextBytes(Contents); try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); WebHdfsFileSystem fs = WebHdfsTestUtil.GetWebHdfsFileSystem(conf, WebHdfsFileSystem .Scheme); using (OutputStream os = fs.Create(new Path(Path))) { os.Write(Contents); } IPEndPoint addr = cluster.GetNameNode().GetHttpAddress(); Uri url = new Uri("http", addr.GetHostString(), addr.Port, WebHdfsFileSystem.PathPrefix + Path + "?op=OPEN" + Param.ToSortedString("&", new OffsetParam((long)Offset), new LengthParam((long)Length))); HttpURLConnection conn = (HttpURLConnection)url.OpenConnection(); conn.SetInstanceFollowRedirects(true); NUnit.Framework.Assert.AreEqual(Length, conn.GetContentLength()); byte[] subContents = new byte[Length]; byte[] realContents = new byte[Length]; System.Array.Copy(Contents, Offset, subContents, 0, Length); IOUtils.ReadFully(conn.GetInputStream(), realContents); Assert.AssertArrayEquals(subContents, realContents); } finally { if (cluster != null) { cluster.Shutdown(); } } }
/// <exception cref="System.Exception"/> private void ValidateLazyTokenFetch(Configuration clusterConf) { string testUser = "******"; UserGroupInformation ugi = UserGroupInformation.CreateUserForTesting(testUser, new string[] { "supergroup" }); WebHdfsFileSystem fs = ugi.DoAs(new _PrivilegedExceptionAction_304(this, clusterConf )); // verify token ops don't get a token NUnit.Framework.Assert.IsNull(fs.GetRenewToken()); Org.Apache.Hadoop.Security.Token.Token <object> token = ((Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier>)fs.GetDelegationToken(null)); fs.RenewDelegationToken(token); fs.CancelDelegationToken(token); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).GetDelegationToken(); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).ReplaceExpiredDelegationToken (); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).SetDelegationToken(Matchers.Any <Org.Apache.Hadoop.Security.Token.Token>()); NUnit.Framework.Assert.IsNull(fs.GetRenewToken()); Org.Mockito.Mockito.Reset(fs); // verify first non-token op gets a token Path p = new Path("/f"); fs.Create(p, (short)1).Close(); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Times(1)).GetDelegationToken(); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).ReplaceExpiredDelegationToken (); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Times(1)).GetDelegationToken(Matchers.AnyString ()); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Times(1)).SetDelegationToken(Matchers.Any <Org.Apache.Hadoop.Security.Token.Token>()); token = fs.GetRenewToken(); NUnit.Framework.Assert.IsNotNull(token); NUnit.Framework.Assert.AreEqual(testUser, GetTokenOwner(token)); NUnit.Framework.Assert.AreEqual(fs.GetTokenKind(), token.GetKind()); Org.Mockito.Mockito.Reset(fs); // verify prior token is reused fs.GetFileStatus(p); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Times(1)).GetDelegationToken(); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).ReplaceExpiredDelegationToken (); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).GetDelegationToken(Matchers.AnyString ()); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).SetDelegationToken(Matchers.Any <Org.Apache.Hadoop.Security.Token.Token>()); Org.Apache.Hadoop.Security.Token.Token <object> token2 = fs.GetRenewToken(); NUnit.Framework.Assert.IsNotNull(token2); NUnit.Framework.Assert.AreEqual(fs.GetTokenKind(), token.GetKind()); NUnit.Framework.Assert.AreSame(token, token2); Org.Mockito.Mockito.Reset(fs); // verify renew of expired token fails w/o getting a new token token = fs.GetRenewToken(); fs.CancelDelegationToken(token); try { fs.RenewDelegationToken(token); NUnit.Framework.Assert.Fail("should have failed"); } catch (SecretManager.InvalidToken) { } catch (Exception ex) { NUnit.Framework.Assert.Fail("wrong exception:" + ex); } Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).GetDelegationToken(); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).ReplaceExpiredDelegationToken (); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).GetDelegationToken(Matchers.AnyString ()); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).SetDelegationToken(Matchers.Any <Org.Apache.Hadoop.Security.Token.Token>()); token2 = fs.GetRenewToken(); NUnit.Framework.Assert.IsNotNull(token2); NUnit.Framework.Assert.AreEqual(fs.GetTokenKind(), token.GetKind()); NUnit.Framework.Assert.AreSame(token, token2); Org.Mockito.Mockito.Reset(fs); // verify cancel of expired token fails w/o getting a new token try { fs.CancelDelegationToken(token); NUnit.Framework.Assert.Fail("should have failed"); } catch (SecretManager.InvalidToken) { } catch (Exception ex) { NUnit.Framework.Assert.Fail("wrong exception:" + ex); } Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).GetDelegationToken(); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).ReplaceExpiredDelegationToken (); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).GetDelegationToken(Matchers.AnyString ()); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).SetDelegationToken(Matchers.Any <Org.Apache.Hadoop.Security.Token.Token>()); token2 = fs.GetRenewToken(); NUnit.Framework.Assert.IsNotNull(token2); NUnit.Framework.Assert.AreEqual(fs.GetTokenKind(), token.GetKind()); NUnit.Framework.Assert.AreSame(token, token2); Org.Mockito.Mockito.Reset(fs); // verify an expired token is replaced with a new token fs.Open(p).Close(); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Times(2)).GetDelegationToken(); // first bad, then good Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Times(1)).ReplaceExpiredDelegationToken (); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Times(1)).GetDelegationToken(null ); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Times(1)).SetDelegationToken(Matchers.Any <Org.Apache.Hadoop.Security.Token.Token>()); token2 = fs.GetRenewToken(); NUnit.Framework.Assert.IsNotNull(token2); NUnit.Framework.Assert.AreEqual(fs.GetTokenKind(), token.GetKind()); NUnit.Framework.Assert.AreNotSame(token, token2); NUnit.Framework.Assert.AreEqual(testUser, GetTokenOwner(token2)); Org.Mockito.Mockito.Reset(fs); // verify with open because it's a little different in how it // opens connections fs.CancelDelegationToken(fs.GetRenewToken()); InputStream @is = fs.Open(p); @is.Read(); @is.Close(); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Times(2)).GetDelegationToken(); // first bad, then good Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Times(1)).ReplaceExpiredDelegationToken (); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Times(1)).GetDelegationToken(null ); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Times(1)).SetDelegationToken(Matchers.Any <Org.Apache.Hadoop.Security.Token.Token>()); token2 = fs.GetRenewToken(); NUnit.Framework.Assert.IsNotNull(token2); NUnit.Framework.Assert.AreEqual(fs.GetTokenKind(), token.GetKind()); NUnit.Framework.Assert.AreNotSame(token, token2); NUnit.Framework.Assert.AreEqual(testUser, GetTokenOwner(token2)); Org.Mockito.Mockito.Reset(fs); // verify fs close cancels the token fs.Close(); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).GetDelegationToken(); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).ReplaceExpiredDelegationToken (); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).GetDelegationToken(Matchers.AnyString ()); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).SetDelegationToken(Matchers.Any <Org.Apache.Hadoop.Security.Token.Token>()); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Times(1)).CancelDelegationToken (Matchers.Eq(token2)); // add a token to ugi for a new fs, verify it uses that token token = ((Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier>)fs.GetDelegationToken (null)); ugi.AddToken(token); fs = ugi.DoAs(new _PrivilegedExceptionAction_426(this, clusterConf)); NUnit.Framework.Assert.IsNull(fs.GetRenewToken()); fs.GetFileStatus(new Path("/")); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Times(1)).GetDelegationToken(); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).ReplaceExpiredDelegationToken (); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).GetDelegationToken(Matchers.AnyString ()); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Times(1)).SetDelegationToken(Matchers.Eq (token)); token2 = fs.GetRenewToken(); NUnit.Framework.Assert.IsNotNull(token2); NUnit.Framework.Assert.AreEqual(fs.GetTokenKind(), token.GetKind()); NUnit.Framework.Assert.AreSame(token, token2); Org.Mockito.Mockito.Reset(fs); // verify it reuses the prior ugi token fs.GetFileStatus(new Path("/")); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Times(1)).GetDelegationToken(); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).ReplaceExpiredDelegationToken (); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).GetDelegationToken(Matchers.AnyString ()); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).SetDelegationToken(Matchers.Any <Org.Apache.Hadoop.Security.Token.Token>()); token2 = fs.GetRenewToken(); NUnit.Framework.Assert.IsNotNull(token2); NUnit.Framework.Assert.AreEqual(fs.GetTokenKind(), token.GetKind()); NUnit.Framework.Assert.AreSame(token, token2); Org.Mockito.Mockito.Reset(fs); // verify an expired ugi token is NOT replaced with a new token fs.CancelDelegationToken(token); for (int i = 0; i < 2; i++) { try { fs.GetFileStatus(new Path("/")); NUnit.Framework.Assert.Fail("didn't fail"); } catch (SecretManager.InvalidToken) { } catch (Exception ex) { NUnit.Framework.Assert.Fail("wrong exception:" + ex); } Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Times(1)).GetDelegationToken(); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Times(1)).ReplaceExpiredDelegationToken (); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).GetDelegationToken(Matchers.AnyString ()); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).SetDelegationToken(Matchers.Any <Org.Apache.Hadoop.Security.Token.Token>()); token2 = fs.GetRenewToken(); NUnit.Framework.Assert.IsNotNull(token2); NUnit.Framework.Assert.AreEqual(fs.GetTokenKind(), token.GetKind()); NUnit.Framework.Assert.AreSame(token, token2); Org.Mockito.Mockito.Reset(fs); } // verify fs close does NOT cancel the ugi token fs.Close(); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).GetDelegationToken(); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).ReplaceExpiredDelegationToken (); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).GetDelegationToken(Matchers.AnyString ()); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).SetDelegationToken(Matchers.Any <Org.Apache.Hadoop.Security.Token.Token>()); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).CancelDelegationToken (Matchers.Any <Org.Apache.Hadoop.Security.Token.Token>()); }
/// <exception cref="System.IO.IOException"/> public virtual void TestResponseCode() { WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs; Path root = new Path("/"); Path dir = new Path("/test/testUrl"); NUnit.Framework.Assert.IsTrue(webhdfs.Mkdirs(dir)); Path file = new Path("/test/file"); FSDataOutputStream @out = webhdfs.Create(file); @out.Write(1); @out.Close(); { //test GETHOMEDIRECTORY Uri url = webhdfs.ToUrl(GetOpParam.OP.Gethomedirectory, root); HttpURLConnection conn = (HttpURLConnection)url.OpenConnection(); IDictionary <object, object> m = WebHdfsTestUtil.ConnectAndGetJson(conn, HttpServletResponse .ScOk); NUnit.Framework.Assert.AreEqual(WebHdfsFileSystem.GetHomeDirectoryString(ugi), m[ typeof(Path).Name]); conn.Disconnect(); } { //test GETHOMEDIRECTORY with unauthorized doAs Uri url = webhdfs.ToUrl(GetOpParam.OP.Gethomedirectory, root, new DoAsParam(ugi.GetShortUserName () + "proxy")); HttpURLConnection conn = (HttpURLConnection)url.OpenConnection(); conn.Connect(); NUnit.Framework.Assert.AreEqual(HttpServletResponse.ScForbidden, conn.GetResponseCode ()); conn.Disconnect(); } { //test set owner with empty parameters Uri url = webhdfs.ToUrl(PutOpParam.OP.Setowner, dir); HttpURLConnection conn = (HttpURLConnection)url.OpenConnection(); conn.Connect(); NUnit.Framework.Assert.AreEqual(HttpServletResponse.ScBadRequest, conn.GetResponseCode ()); conn.Disconnect(); } { //test set replication on a directory HttpOpParam.OP op = PutOpParam.OP.Setreplication; Uri url = webhdfs.ToUrl(op, dir); HttpURLConnection conn = (HttpURLConnection)url.OpenConnection(); conn.SetRequestMethod(op.GetType().ToString()); conn.Connect(); NUnit.Framework.Assert.AreEqual(HttpServletResponse.ScOk, conn.GetResponseCode()); NUnit.Framework.Assert.IsFalse(webhdfs.SetReplication(dir, (short)1)); conn.Disconnect(); } { //test get file status for a non-exist file. Path p = new Path(dir, "non-exist"); Uri url = webhdfs.ToUrl(GetOpParam.OP.Getfilestatus, p); HttpURLConnection conn = (HttpURLConnection)url.OpenConnection(); conn.Connect(); NUnit.Framework.Assert.AreEqual(HttpServletResponse.ScNotFound, conn.GetResponseCode ()); conn.Disconnect(); } { //test set permission with empty parameters HttpOpParam.OP op = PutOpParam.OP.Setpermission; Uri url = webhdfs.ToUrl(op, dir); HttpURLConnection conn = (HttpURLConnection)url.OpenConnection(); conn.SetRequestMethod(op.GetType().ToString()); conn.Connect(); NUnit.Framework.Assert.AreEqual(HttpServletResponse.ScOk, conn.GetResponseCode()); NUnit.Framework.Assert.AreEqual(0, conn.GetContentLength()); NUnit.Framework.Assert.AreEqual(MediaType.ApplicationOctetStream, conn.GetContentType ()); NUnit.Framework.Assert.AreEqual((short)0x1ed, webhdfs.GetFileStatus(dir).GetPermission ().ToShort()); conn.Disconnect(); } { //test append. AppendTestUtil.TestAppend(fs, new Path(dir, "append")); } { //test NamenodeAddressParam not set. HttpOpParam.OP op = PutOpParam.OP.Create; Uri url = webhdfs.ToUrl(op, dir); HttpURLConnection conn = (HttpURLConnection)url.OpenConnection(); conn.SetRequestMethod(op.GetType().ToString()); conn.SetDoOutput(false); conn.SetInstanceFollowRedirects(false); conn.Connect(); string redirect = conn.GetHeaderField("Location"); conn.Disconnect(); //remove NamenodeAddressParam WebHdfsFileSystem.Log.Info("redirect = " + redirect); int i = redirect.IndexOf(NamenodeAddressParam.Name); int j = redirect.IndexOf("&", i); string modified = Sharpen.Runtime.Substring(redirect, 0, i - 1) + Sharpen.Runtime.Substring (redirect, j); WebHdfsFileSystem.Log.Info("modified = " + modified); //connect to datanode conn = (HttpURLConnection) new Uri(modified).OpenConnection(); conn.SetRequestMethod(op.GetType().ToString()); conn.SetDoOutput(op.GetDoOutput()); conn.Connect(); NUnit.Framework.Assert.AreEqual(HttpServletResponse.ScBadRequest, conn.GetResponseCode ()); } { //test jsonParse with non-json type. HttpOpParam.OP op = GetOpParam.OP.Open; Uri url = webhdfs.ToUrl(op, file); HttpURLConnection conn = (HttpURLConnection)url.OpenConnection(); conn.SetRequestMethod(op.GetType().ToString()); conn.Connect(); try { WebHdfsFileSystem.JsonParse(conn, false); Fail(); } catch (IOException ioe) { WebHdfsFileSystem.Log.Info("GOOD", ioe); } conn.Disconnect(); } { //test create with path containing spaces HttpOpParam.OP op = PutOpParam.OP.Create; Path path = new Path("/test/path with spaces"); Uri url = webhdfs.ToUrl(op, path); HttpURLConnection conn = (HttpURLConnection)url.OpenConnection(); conn.SetRequestMethod(op.GetType().ToString()); conn.SetDoOutput(false); conn.SetInstanceFollowRedirects(false); string redirect; try { conn.Connect(); NUnit.Framework.Assert.AreEqual(HttpServletResponse.ScTemporaryRedirect, conn.GetResponseCode ()); redirect = conn.GetHeaderField("Location"); } finally { conn.Disconnect(); } conn = (HttpURLConnection) new Uri(redirect).OpenConnection(); conn.SetRequestMethod(op.GetType().ToString()); conn.SetDoOutput(op.GetDoOutput()); try { conn.Connect(); NUnit.Framework.Assert.AreEqual(HttpServletResponse.ScCreated, conn.GetResponseCode ()); } finally { conn.Disconnect(); } } }