Example #1
0
        /// <summary>test seek</summary>
        /// <exception cref="System.IO.IOException"/>
        internal static void VerifySeek(FileSystem fs, Path p, long offset, long length,
                                        byte[] buf, byte[] expected)
        {
            long remaining = length - offset;
            long @checked  = 0;

            Log.Info("XXX SEEK: offset=" + offset + ", remaining=" + remaining);
            TestWebHDFS.Ticker t = new TestWebHDFS.Ticker("SEEK", "offset=%d, remaining=%d",
                                                          offset, remaining);
            FSDataInputStream @in = fs.Open(p, 64 << 10);

            @in.Seek(offset);
            for (; remaining > 0;)
            {
                t.Tick(@checked, "offset=%d, remaining=%d", offset, remaining);
                int n = (int)Math.Min(remaining, buf.Length);
                @in.ReadFully(buf, 0, n);
                CheckData(offset, remaining, n, buf, expected);
                offset    += n;
                remaining -= n;
                @checked  += n;
            }
            @in.Close();
            t.End(@checked);
        }
Example #2
0
        //200MB file length
        /// <summary>Test read and write large files.</summary>
        /// <exception cref="System.Exception"/>
        internal static void LargeFileTest(long fileLength)
        {
            Configuration  conf    = WebHdfsTestUtil.CreateConf();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();

            try
            {
                cluster.WaitActive();
                FileSystem fs = WebHdfsTestUtil.GetWebHdfsFileSystem(conf, WebHdfsFileSystem.Scheme
                                                                     );
                Path dir = new Path("/test/largeFile");
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(dir));
                byte[] data = new byte[1 << 20];
                Random.NextBytes(data);
                byte[] expected = new byte[2 * data.Length];
                System.Array.Copy(data, 0, expected, 0, data.Length);
                System.Array.Copy(data, 0, expected, data.Length, data.Length);
                Path p = new Path(dir, "file");
                TestWebHDFS.Ticker t = new TestWebHDFS.Ticker("WRITE", "fileLength=" + fileLength
                                                              );
                FSDataOutputStream @out = fs.Create(p);
                try
                {
                    long remaining = fileLength;
                    for (; remaining > 0;)
                    {
                        t.Tick(fileLength - remaining, "remaining=%d", remaining);
                        int n = (int)Math.Min(remaining, data.Length);
                        @out.Write(data, 0, n);
                        remaining -= n;
                    }
                }
                finally
                {
                    @out.Close();
                }
                t.End(fileLength);
                NUnit.Framework.Assert.AreEqual(fileLength, fs.GetFileStatus(p).GetLen());
                long   smallOffset = Random.Next(1 << 20) + (1 << 20);
                long   largeOffset = fileLength - smallOffset;
                byte[] buf         = new byte[data.Length];
                VerifySeek(fs, p, largeOffset, fileLength, buf, expected);
                VerifySeek(fs, p, smallOffset, fileLength, buf, expected);
                VerifyPread(fs, p, largeOffset, fileLength, buf, expected);
            }
            finally
            {
                cluster.Shutdown();
            }
        }