コード例 #1
0
ファイル: ChecksumFs.cs プロジェクト: orf53975/hadoop.net
            /// <exception cref="System.IO.IOException"/>
            /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/>
            public ChecksumFSInputChecker(ChecksumFs fs, Path file, int bufferSize)
                : base(file, fs.GetFileStatus(file).GetReplication())
            {
                this.datas = fs.GetRawFs().Open(file, bufferSize);
                this.fs    = fs;
                Path sumFile = fs.GetChecksumFile(file);

                try
                {
                    int sumBufferSize = fs.GetSumBufferSize(fs.GetBytesPerSum(), bufferSize);
                    sums = fs.GetRawFs().Open(sumFile, sumBufferSize);
                    byte[] version = new byte[ChecksumVersion.Length];
                    sums.ReadFully(version);
                    if (!Arrays.Equals(version, ChecksumVersion))
                    {
                        throw new IOException("Not a checksum file: " + sumFile);
                    }
                    this.bytesPerSum = sums.ReadInt();
                    Set(fs.verifyChecksum, DataChecksum.NewCrc32(), bytesPerSum, 4);
                }
                catch (FileNotFoundException)
                {
                    // quietly ignore
                    Set(fs.verifyChecksum, null, 1, 0);
                }
                catch (IOException e)
                {
                    // loudly ignore
                    Log.Warn("Problem opening checksum file: " + file + ".  Ignoring exception: ", e);
                    Set(fs.verifyChecksum, null, 1, 0);
                }
            }
コード例 #2
0
        public virtual void TestOpenManyFilesViaTcp()
        {
            int           NumOpens = 500;
            Configuration conf     = new Configuration();

            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, false);
            MiniDFSCluster cluster = null;

            FSDataInputStream[] streams = new FSDataInputStream[NumOpens];
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).Build();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                Path TestPath             = new Path("/testFile");
                DFSTestUtil.CreateFile(dfs, TestPath, 131072, (short)1, 1);
                for (int i = 0; i < NumOpens; i++)
                {
                    streams[i] = dfs.Open(TestPath);
                    Log.Info("opening file " + i + "...");
                    NUnit.Framework.Assert.IsTrue(-1 != streams[i].Read());
                    streams[i].Unbuffer();
                }
            }
            finally
            {
                foreach (FSDataInputStream stream in streams)
                {
                    IOUtils.Cleanup(null, stream);
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
コード例 #3
0
ファイル: AvroFSInput.cs プロジェクト: orf53975/hadoop.net
        /// <summary>
        /// Construct given a
        /// <see cref="FileContext"/>
        /// and a
        /// <see cref="Path"/>
        /// .
        /// </summary>
        /// <exception cref="System.IO.IOException"/>
        public AvroFSInput(FileContext fc, Path p)
        {
            FileStatus status = fc.GetFileStatus(p);

            this.len    = status.GetLen();
            this.stream = fc.Open(p);
        }
コード例 #4
0
        public virtual void TestStatistics()
        {
            URI fsUri = GetFsUri();

            FileSystem.Statistics stats = FileContext.GetStatistics(fsUri);
            Assert.Equal(0, stats.GetBytesRead());
            Path filePath = fileContextTestHelper.GetTestRootPath(fc, "file1");

            FileContextTestHelper.CreateFile(fc, filePath, numBlocks, blockSize);
            Assert.Equal(0, stats.GetBytesRead());
            VerifyWrittenBytes(stats);
            FSDataInputStream fstr = fc.Open(filePath);

            byte[] buf       = new byte[blockSize];
            int    bytesRead = fstr.Read(buf, 0, blockSize);

            fstr.Read(0, buf, 0, blockSize);
            Assert.Equal(blockSize, bytesRead);
            VerifyReadBytes(stats);
            VerifyWrittenBytes(stats);
            VerifyReadBytes(FileContext.GetStatistics(GetFsUri()));
            IDictionary <URI, FileSystem.Statistics> statsMap = FileContext.GetAllStatistics();
            URI exactUri = GetSchemeAuthorityUri();

            VerifyWrittenBytes(statsMap[exactUri]);
            fc.Delete(filePath, true);
        }
コード例 #5
0
        public virtual void TestBufferedFSInputStream()
        {
            Configuration conf = new Configuration();

            conf.SetClass("fs.file.impl", typeof(RawLocalFileSystem), typeof(FileSystem));
            conf.SetInt(CommonConfigurationKeysPublic.IoFileBufferSizeKey, 4096);
            FileSystem fs = FileSystem.NewInstance(conf);

            byte[] buf = new byte[10 * 1024];
            new Random().NextBytes(buf);
            // Write random bytes to file
            FSDataOutputStream stream = fs.Create(TestPath);

            try
            {
                stream.Write(buf);
            }
            finally
            {
                stream.Close();
            }
            Random            r   = new Random();
            FSDataInputStream stm = fs.Open(TestPath);

            // Record the sequence of seeks and reads which trigger a failure.
            int[] seeks = new int[10];
            int[] reads = new int[10];
            try
            {
                for (int i = 0; i < 1000; i++)
                {
                    int seekOff = r.Next(buf.Length);
                    int toRead  = r.Next(Math.Min(buf.Length - seekOff, 32000));
                    seeks[i % seeks.Length] = seekOff;
                    reads[i % reads.Length] = toRead;
                    VerifyRead(stm, buf, seekOff, toRead);
                }
            }
            catch (Exception afe)
            {
                StringBuilder sb = new StringBuilder();
                sb.Append("Sequence of actions:\n");
                for (int j = 0; j < seeks.Length; j++)
                {
                    sb.Append("seek @ ").Append(seeks[j]).Append("  ").Append("read ").Append(reads[j
                                                                                              ]).Append("\n");
                }
                System.Console.Error.WriteLine(sb.ToString());
                throw;
            }
            finally
            {
                stm.Close();
            }
        }
コード例 #6
0
        /// <exception cref="System.IO.IOException"/>
        public virtual void TestInputStreamClosedTwice()
        {
            //HADOOP-4760 according to Closeable#close() closing already-closed
            //streams should have no effect.
            Org.Apache.Hadoop.FS.Path src = Path("/test/hadoop/file");
            CreateFile(src);
            FSDataInputStream @in = fs.Open(src);

            @in.Close();
            @in.Close();
        }
コード例 #7
0
        public virtual void TestFallbackRead()
        {
            HdfsConfiguration conf     = InitZeroCopyTest();
            MiniDFSCluster    cluster  = null;
            Path TestPath              = new Path("/a");
            int  TestFileLength        = 16385;
            int  RandomSeed            = 23453;
            FSDataInputStream     fsIn = null;
            DistributedFileSystem fs   = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, TestPath, TestFileLength, (short)1, RandomSeed);
                try
                {
                    DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
                }
                catch (Exception e)
                {
                    NUnit.Framework.Assert.Fail("unexpected InterruptedException during " + "waitReplication: "
                                                + e);
                }
                catch (TimeoutException e)
                {
                    NUnit.Framework.Assert.Fail("unexpected TimeoutException during " + "waitReplication: "
                                                + e);
                }
                fsIn = fs.Open(TestPath);
                byte[] original = new byte[TestFileLength];
                IOUtils.ReadFully(fsIn, original, 0, TestFileLength);
                fsIn.Close();
                fsIn = fs.Open(TestPath);
                TestFallbackImpl(fsIn, original);
            }
            finally
            {
                if (fsIn != null)
                {
                    fsIn.Close();
                }
                if (fs != null)
                {
                    fs.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
コード例 #8
0
        /// <exception cref="System.IO.IOException"/>
        private string ReadFile(string @out)
        {
            Path              path = new Path(@out);
            FileStatus        stat = lfs.GetFileStatus(path);
            FSDataInputStream @in  = lfs.Open(path);

            byte[] buffer = new byte[(int)stat.GetLen()];
            @in.ReadFully(buffer);
            @in.Close();
            lfs.Delete(path, false);
            return(Runtime.GetStringForBytes(buffer));
        }
コード例 #9
0
 /// <exception cref="System.IO.IOException"/>
 private void VerifyRead(FSDataInputStream stm, byte[] fileContents, int seekOff,
                         int toRead)
 {
     byte[] @out = new byte[toRead];
     stm.Seek(seekOff);
     stm.ReadFully(@out);
     byte[] expected = Arrays.CopyOfRange(fileContents, seekOff, seekOff + toRead);
     if (!Arrays.Equals(@out, expected))
     {
         string s = "\nExpected: " + StringUtils.ByteToHexString(expected) + "\ngot:      "
                    + StringUtils.ByteToHexString(@out) + "\noff=" + seekOff + " len=" + toRead;
         NUnit.Framework.Assert.Fail(s);
     }
 }
コード例 #10
0
        public virtual void TestUnbufferClosesSockets()
        {
            Configuration conf = new Configuration();

            // Set a new ClientContext.  This way, we will have our own PeerCache,
            // rather than sharing one with other unit tests.
            conf.Set(DFSConfigKeys.DfsClientContext, "testUnbufferClosesSocketsContext");
            // Disable short-circuit reads.  With short-circuit, we wouldn't hold open a
            // TCP socket.
            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, false);
            // Set a really long socket timeout to avoid test timing issues.
            conf.SetLong(DFSConfigKeys.DfsClientSocketTimeoutKey, 100000000L);
            conf.SetLong(DFSConfigKeys.DfsClientSocketCacheExpiryMsecKey, 100000000L);
            MiniDFSCluster    cluster = null;
            FSDataInputStream stream  = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).Build();
                DistributedFileSystem dfs = (DistributedFileSystem)FileSystem.NewInstance(conf);
                Path TestPath             = new Path("/test1");
                DFSTestUtil.CreateFile(dfs, TestPath, 128, (short)1, 1);
                stream = dfs.Open(TestPath);
                // Read a byte.  This will trigger the creation of a block reader.
                stream.Seek(2);
                int b = stream.Read();
                NUnit.Framework.Assert.IsTrue(-1 != b);
                // The Peer cache should start off empty.
                PeerCache cache = dfs.GetClient().GetClientContext().GetPeerCache();
                NUnit.Framework.Assert.AreEqual(0, cache.Size());
                // Unbuffer should clear the block reader and return the socket to the
                // cache.
                stream.Unbuffer();
                stream.Seek(2);
                NUnit.Framework.Assert.AreEqual(1, cache.Size());
                int b2 = stream.Read();
                NUnit.Framework.Assert.AreEqual(b, b2);
            }
            finally
            {
                if (stream != null)
                {
                    IOUtils.Cleanup(null, stream);
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
コード例 #11
0
            /// <exception cref="System.IO.IOException"/>
            public virtual void Map(Text key, LongWritable value, OutputCollector <K, LongWritable
                                                                                   > collector, Reporter reporter)
            {
                string name = key.ToString();
                long   size = value.Get();
                long   seed = long.Parse(name);

                if (size == 0)
                {
                    return;
                }
                reporter.SetStatus("opening " + name);
                FSDataInputStream @in = fs.Open(new Path(DataDir, name));

                try
                {
                    for (int i = 0; i < SeeksPerFile; i++)
                    {
                        // generate a random position
                        long position = Math.Abs(random.NextLong()) % size;
                        // seek file to that position
                        reporter.SetStatus("seeking " + name);
                        @in.Seek(position);
                        byte b = @in.ReadByte();
                        // check that byte matches
                        byte checkByte = 0;
                        // advance random state to that position
                        random.SetSeed(seed);
                        for (int p = 0; p <= position; p += check.Length)
                        {
                            reporter.SetStatus("generating data for " + name);
                            if (fastCheck)
                            {
                                checkByte = unchecked ((byte)random.Next(byte.MaxValue));
                            }
                            else
                            {
                                random.NextBytes(check);
                                checkByte = check[(int)(position % check.Length)];
                            }
                        }
                        NUnit.Framework.Assert.AreEqual(b, checkByte);
                    }
                }
                finally
                {
                    @in.Close();
                }
            }
コード例 #12
0
        public virtual void TestStreamType()
        {
            Path testPath = new Path(TestRootDir, "testStreamType");

            localFs.Create(testPath).Close();
            FSDataInputStream @in = null;

            localFs.SetVerifyChecksum(true);
            @in = localFs.Open(testPath);
            Assert.True("stream is input checker", @in.GetWrappedStream() is
                        FSInputChecker);
            localFs.SetVerifyChecksum(false);
            @in = localFs.Open(testPath);
            NUnit.Framework.Assert.IsFalse("stream is not input checker", @in.GetWrappedStream
                                               () is FSInputChecker);
        }
コード例 #13
0
            /// <exception cref="System.IO.IOException"/>
            public override IDisposable GetIOStream(string name)
            {
                // IOMapperBase
                Path filePath = new Path(GetDataDir(GetConf()), name);

                this.fileSize = fs.GetFileStatus(filePath).GetLen();
                InputStream @in = fs.Open(filePath);

                if (compressionCodec != null)
                {
                    @in = new FSDataInputStream(compressionCodec.CreateInputStream(@in));
                }
                Log.Info("in = " + @in.GetType().FullName);
                Log.Info("skipSize = " + skipSize);
                return(@in);
            }
コード例 #14
0
        /// <exception cref="System.IO.IOException"/>
        internal static string ReadFile(FileSystem fs, Path name, int buflen)
        {
            byte[]            b      = new byte[buflen];
            int               offset = 0;
            FSDataInputStream @in    = fs.Open(name);

            for (int remaining; (remaining = b.Length - offset) > 0 && (n = @in.Read(b, offset
                                                                                     , remaining)) != -1; offset += n)
            {
            }
            Assert.Equal(offset, Math.Min(b.Length, @in.GetPos()));
            @in.Close();
            string s = Runtime.GetStringForBytes(b, 0, offset);

            return(s);
        }
コード例 #15
0
        /// <exception cref="System.IO.IOException"/>
        private void VerifyFile(FileSystem fs, Path file, int bytesToVerify, byte[] expectedBytes
                                )
        {
            FSDataInputStream @in = fs.Open(file);

            try
            {
                byte[] readBuf = new byte[bytesToVerify];
                @in.ReadFully(readBuf, 0, bytesToVerify);
                for (int i = 0; i < bytesToVerify; i++)
                {
                    Assert.Equal(expectedBytes[i], readBuf[i]);
                }
            }
            finally
            {
                @in.Close();
            }
        }
コード例 #16
0
            /// <exception cref="System.IO.IOException"/>
            internal override object DoIO(Reporter reporter, string name, long offset)
            {
                // open file
                FSDataInputStream @in = null;
                Path p = new Path(name);

                try
                {
                    @in = fs.Open(p);
                }
                catch (IOException)
                {
                    return(name + "@(missing)");
                }
                @in.Seek(offset);
                long actualSize = 0;

                try
                {
                    long blockSize = fs.GetDefaultBlockSize(p);
                    reporter.SetStatus("reading " + name + "@" + offset + "/" + blockSize);
                    for (int curSize = bufferSize; curSize == bufferSize && actualSize < blockSize; actualSize
                             += curSize)
                    {
                        curSize = @in.Read(buffer, 0, bufferSize);
                    }
                }
                catch (IOException)
                {
                    Log.Info("Corrupted block detected in \"" + name + "\" at " + offset);
                    return(name + "@" + offset);
                }
                finally
                {
                    @in.Close();
                }
                return(actualSize);
            }
コード例 #17
0
        public virtual void TestTruncatedChecksum()
        {
            Path testPath           = new Path(TestRootDir, "testtruncatedcrc");
            FSDataOutputStream fout = localFs.Create(testPath);

            fout.Write(Runtime.GetBytesForString("testing truncation"));
            fout.Close();
            // Read in the checksum
            Path              checksumFile   = localFs.GetChecksumFile(testPath);
            FileSystem        rawFs          = localFs.GetRawFileSystem();
            FSDataInputStream checksumStream = rawFs.Open(checksumFile);

            byte[] buf  = new byte[8192];
            int    read = checksumStream.Read(buf, 0, buf.Length);

            checksumStream.Close();
            // Now rewrite the checksum file with the last byte missing
            FSDataOutputStream replaceStream = rawFs.Create(checksumFile);

            replaceStream.Write(buf, 0, read - 1);
            replaceStream.Close();
            // Now reading the file should fail with a ChecksumException
            try
            {
                FileSystemTestHelper.ReadFile(localFs, testPath, 1024);
                NUnit.Framework.Assert.Fail("Did not throw a ChecksumException when reading truncated "
                                            + "crc file");
            }
            catch (ChecksumException)
            {
            }
            // telling it not to verify checksums, should avoid issue.
            localFs.SetVerifyChecksum(false);
            string str = FileSystemTestHelper.ReadFile(localFs, testPath, 1024).ToString();

            Assert.True("read", "testing truncation".Equals(str));
        }
コード例 #18
0
        /// <summary>Write a file and read it in, validating the result.</summary>
        /// <remarks>
        /// Write a file and read it in, validating the result. Optional flags control
        /// whether file overwrite operations should be enabled, and whether the
        /// file should be deleted afterwards.
        /// If there is a mismatch between what was written and what was expected,
        /// a small range of bytes either side of the first error are logged to aid
        /// diagnosing what problem occurred -whether it was a previous file
        /// or a corrupting of the current file. This assumes that two
        /// sequential runs to the same path use datasets with different character
        /// moduli.
        /// </remarks>
        /// <param name="path">path to write to</param>
        /// <param name="len">length of data</param>
        /// <param name="overwrite">should the create option allow overwrites?</param>
        /// <param name="delete">
        /// should the file be deleted afterwards? -with a verification
        /// that it worked. Deletion is not attempted if an assertion has failed
        /// earlier -it is not in a <code>finally{}</code> block.
        /// </param>
        /// <exception cref="System.IO.IOException">IO problems</exception>
        protected internal virtual void WriteAndRead(Org.Apache.Hadoop.FS.Path path, byte
                                                     [] src, int len, bool overwrite, bool delete)
        {
            Assert.True("Not enough data in source array to write " + len +
                        " bytes", src.Length >= len);
            fs.Mkdirs(path.GetParent());
            FSDataOutputStream @out = fs.Create(path, overwrite, fs.GetConf().GetInt("io.file.buffer.size"
                                                                                     , 4096), (short)1, GetBlockSize());

            @out.Write(src, 0, len);
            @out.Close();
            Assert.True("Exists", fs.Exists(path));
            Assert.Equal("Length", len, fs.GetFileStatus(path).GetLen());
            FSDataInputStream @in = fs.Open(path);

            byte[] buf = new byte[len];
            @in.ReadFully(0, buf);
            @in.Close();
            Assert.Equal(len, buf.Length);
            int errors           = 0;
            int first_error_byte = -1;

            for (int i = 0; i < len; i++)
            {
                if (src[i] != buf[i])
                {
                    if (errors == 0)
                    {
                        first_error_byte = i;
                    }
                    errors++;
                }
            }
            if (errors > 0)
            {
                string message = string.Format(" %d errors in file of length %d", errors, len);
                Log.Warn(message);
                // the range either side of the first error to print
                // this is a purely arbitrary number, to aid user debugging
                int overlap = 10;
                for (int i_1 = Math.Max(0, first_error_byte - overlap); i_1 < Math.Min(first_error_byte
                                                                                       + overlap, len); i_1++)
                {
                    byte   actual   = buf[i_1];
                    byte   expected = src[i_1];
                    string letter   = ToChar(actual);
                    string line     = string.Format("[%04d] %2x %s\n", i_1, actual, letter);
                    if (expected != actual)
                    {
                        line = string.Format("[%04d] %2x %s -expected %2x %s\n", i_1, actual, letter, expected
                                             , ToChar(expected));
                    }
                    Log.Warn(line);
                }
                Fail(message);
            }
            if (delete)
            {
                bool deleted = fs.Delete(path, false);
                Assert.True("Deleted", deleted);
                NUnit.Framework.Assert.IsFalse("No longer exists", fs.Exists(path));
            }
        }
コード例 #19
0
        /// <summary>
        /// Test that we can zero-copy read cached data even without disabling
        /// checksums.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestZeroCopyReadOfCachedData()
        {
            BlockReaderTestUtil.EnableShortCircuitShmTracing();
            BlockReaderTestUtil.EnableBlockReaderFactoryTracing();
            BlockReaderTestUtil.EnableHdfsCachingTracing();
            int  TestFileLength    = BlockSize;
            Path TestPath          = new Path("/a");
            int  RandomSeed        = 23453;
            HdfsConfiguration conf = InitZeroCopyTest();

            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, false);
            string Context = "testZeroCopyReadOfCachedData";

            conf.Set(DFSConfigKeys.DfsClientContext, Context);
            conf.SetLong(DFSConfigKeys.DfsDatanodeMaxLockedMemoryKey, DFSTestUtil.RoundUpToMultiple
                             (TestFileLength, (int)NativeIO.POSIX.GetCacheManipulator().GetOperatingSystemPageSize
                                 ()));
            MiniDFSCluster cluster = null;
            ByteBuffer     result  = null;
            ByteBuffer     result2 = null;

            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            cluster.WaitActive();
            FsDatasetSpi <object> fsd = cluster.GetDataNodes()[0].GetFSDataset();
            DistributedFileSystem fs  = cluster.GetFileSystem();

            DFSTestUtil.CreateFile(fs, TestPath, TestFileLength, (short)1, RandomSeed);
            DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
            byte[] original = DFSTestUtil.CalculateFileContentsFromSeed(RandomSeed, TestFileLength
                                                                        );
            // Prior to caching, the file can't be read via zero-copy
            FSDataInputStream fsIn = fs.Open(TestPath);

            try
            {
                result = fsIn.Read(null, TestFileLength / 2, EnumSet.NoneOf <ReadOption>());
                NUnit.Framework.Assert.Fail("expected UnsupportedOperationException");
            }
            catch (NotSupportedException)
            {
            }
            // expected
            // Cache the file
            fs.AddCachePool(new CachePoolInfo("pool1"));
            long directiveId = fs.AddCacheDirective(new CacheDirectiveInfo.Builder().SetPath(
                                                        TestPath).SetReplication((short)1).SetPool("pool1").Build());
            int numBlocks = (int)Math.Ceil((double)TestFileLength / BlockSize);

            DFSTestUtil.VerifyExpectedCacheUsage(DFSTestUtil.RoundUpToMultiple(TestFileLength
                                                                               , BlockSize), numBlocks, cluster.GetDataNodes()[0].GetFSDataset());
            try
            {
                result = fsIn.Read(null, TestFileLength, EnumSet.NoneOf <ReadOption>());
            }
            catch (NotSupportedException)
            {
                NUnit.Framework.Assert.Fail("expected to be able to read cached file via zero-copy"
                                            );
            }
            Assert.AssertArrayEquals(Arrays.CopyOfRange(original, 0, BlockSize), ByteBufferToArray
                                         (result));
            // Test that files opened after the cache operation has finished
            // still get the benefits of zero-copy (regression test for HDFS-6086)
            FSDataInputStream fsIn2 = fs.Open(TestPath);

            try
            {
                result2 = fsIn2.Read(null, TestFileLength, EnumSet.NoneOf <ReadOption>());
            }
            catch (NotSupportedException)
            {
                NUnit.Framework.Assert.Fail("expected to be able to read cached file via zero-copy"
                                            );
            }
            Assert.AssertArrayEquals(Arrays.CopyOfRange(original, 0, BlockSize), ByteBufferToArray
                                         (result2));
            fsIn2.ReleaseBuffer(result2);
            fsIn2.Close();
            // check that the replica is anchored
            ExtendedBlock     firstBlock = DFSTestUtil.GetFirstBlock(fs, TestPath);
            ShortCircuitCache cache      = ClientContext.Get(Context, new DFSClient.Conf(conf)).GetShortCircuitCache
                                               ();

            WaitForReplicaAnchorStatus(cache, firstBlock, true, true, 1);
            // Uncache the replica
            fs.RemoveCacheDirective(directiveId);
            WaitForReplicaAnchorStatus(cache, firstBlock, false, true, 1);
            fsIn.ReleaseBuffer(result);
            WaitForReplicaAnchorStatus(cache, firstBlock, false, false, 1);
            DFSTestUtil.VerifyExpectedCacheUsage(0, 0, fsd);
            fsIn.Close();
            fs.Close();
            cluster.Shutdown();
        }
コード例 #20
0
        public virtual void TestClientMmapDisable()
        {
            HdfsConfiguration conf = InitZeroCopyTest();

            conf.SetBoolean(DFSConfigKeys.DfsClientMmapEnabled, false);
            MiniDFSCluster        cluster        = null;
            Path                  TestPath       = new Path("/a");
            int                   TestFileLength = 16385;
            int                   RandomSeed     = 23453;
            string                Context        = "testClientMmapDisable";
            FSDataInputStream     fsIn           = null;
            DistributedFileSystem fs             = null;

            conf.Set(DFSConfigKeys.DfsClientContext, Context);
            try
            {
                // With DFS_CLIENT_MMAP_ENABLED set to false, we should not do memory
                // mapped reads.
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, TestPath, TestFileLength, (short)1, RandomSeed);
                DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
                fsIn = fs.Open(TestPath);
                try
                {
                    fsIn.Read(null, 1, EnumSet.Of(ReadOption.SkipChecksums));
                    NUnit.Framework.Assert.Fail("expected zero-copy read to fail when client mmaps "
                                                + "were disabled.");
                }
                catch (NotSupportedException)
                {
                }
            }
            finally
            {
                if (fsIn != null)
                {
                    fsIn.Close();
                }
                if (fs != null)
                {
                    fs.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            fsIn    = null;
            fs      = null;
            cluster = null;
            try
            {
                // Now try again with DFS_CLIENT_MMAP_CACHE_SIZE == 0.  It should work.
                conf.SetBoolean(DFSConfigKeys.DfsClientMmapEnabled, true);
                conf.SetInt(DFSConfigKeys.DfsClientMmapCacheSize, 0);
                conf.Set(DFSConfigKeys.DfsClientContext, Context + ".1");
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, TestPath, TestFileLength, (short)1, RandomSeed);
                DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
                fsIn = fs.Open(TestPath);
                ByteBuffer buf = fsIn.Read(null, 1, EnumSet.Of(ReadOption.SkipChecksums));
                fsIn.ReleaseBuffer(buf);
                // Test EOF behavior
                IOUtils.SkipFully(fsIn, TestFileLength - 1);
                buf = fsIn.Read(null, 1, EnumSet.Of(ReadOption.SkipChecksums));
                NUnit.Framework.Assert.AreEqual(null, buf);
            }
            finally
            {
                if (fsIn != null)
                {
                    fsIn.Close();
                }
                if (fs != null)
                {
                    fs.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
コード例 #21
0
        public virtual void TestZeroCopyReads()
        {
            HdfsConfiguration conf    = InitZeroCopyTest();
            MiniDFSCluster    cluster = null;
            Path TestPath             = new Path("/a");
            FSDataInputStream fsIn    = null;
            int        TestFileLength = 3 * BlockSize;
            FileSystem fs             = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, TestPath, TestFileLength, (short)1, 7567L);
                try
                {
                    DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
                }
                catch (Exception e)
                {
                    NUnit.Framework.Assert.Fail("unexpected InterruptedException during " + "waitReplication: "
                                                + e);
                }
                catch (TimeoutException e)
                {
                    NUnit.Framework.Assert.Fail("unexpected TimeoutException during " + "waitReplication: "
                                                + e);
                }
                fsIn = fs.Open(TestPath);
                byte[] original = new byte[TestFileLength];
                IOUtils.ReadFully(fsIn, original, 0, TestFileLength);
                fsIn.Close();
                fsIn = fs.Open(TestPath);
                ByteBuffer result = fsIn.Read(null, BlockSize, EnumSet.Of(ReadOption.SkipChecksums
                                                                          ));
                NUnit.Framework.Assert.AreEqual(BlockSize, result.Remaining());
                HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
                NUnit.Framework.Assert.AreEqual(BlockSize, dfsIn.GetReadStatistics().GetTotalBytesRead
                                                    ());
                NUnit.Framework.Assert.AreEqual(BlockSize, dfsIn.GetReadStatistics().GetTotalZeroCopyBytesRead
                                                    ());
                Assert.AssertArrayEquals(Arrays.CopyOfRange(original, 0, BlockSize), ByteBufferToArray
                                             (result));
                fsIn.ReleaseBuffer(result);
            }
            finally
            {
                if (fsIn != null)
                {
                    fsIn.Close();
                }
                if (fs != null)
                {
                    fs.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
コード例 #22
0
        public virtual void Test2GBMmapLimit()
        {
            Assume.AssumeTrue(BlockReaderTestUtil.ShouldTestLargeFiles());
            HdfsConfiguration conf = InitZeroCopyTest();
            long TestFileLength    = 2469605888L;

            conf.Set(DFSConfigKeys.DfsChecksumTypeKey, "NULL");
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, TestFileLength);
            MiniDFSCluster cluster  = null;
            Path           TestPath = new Path("/a");
            string         Context  = "test2GBMmapLimit";

            conf.Set(DFSConfigKeys.DfsClientContext, Context);
            FSDataInputStream fsIn  = null;
            FSDataInputStream fsIn2 = null;
            ByteBuffer        buf1  = null;
            ByteBuffer        buf2  = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                DistributedFileSystem fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, TestPath, TestFileLength, (short)1, unchecked ((int)(0xB
                                                                                                )));
                DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
                fsIn = fs.Open(TestPath);
                buf1 = fsIn.Read(null, 1, EnumSet.Of(ReadOption.SkipChecksums));
                NUnit.Framework.Assert.AreEqual(1, buf1.Remaining());
                fsIn.ReleaseBuffer(buf1);
                buf1 = null;
                fsIn.Seek(2147483640L);
                buf1 = fsIn.Read(null, 1024, EnumSet.Of(ReadOption.SkipChecksums));
                NUnit.Framework.Assert.AreEqual(7, buf1.Remaining());
                NUnit.Framework.Assert.AreEqual(int.MaxValue, buf1.Limit());
                fsIn.ReleaseBuffer(buf1);
                buf1 = null;
                NUnit.Framework.Assert.AreEqual(2147483647L, fsIn.GetPos());
                try
                {
                    buf1 = fsIn.Read(null, 1024, EnumSet.Of(ReadOption.SkipChecksums));
                    NUnit.Framework.Assert.Fail("expected UnsupportedOperationException");
                }
                catch (NotSupportedException)
                {
                }
                // expected; can't read past 2GB boundary.
                fsIn.Close();
                fsIn = null;
                // Now create another file with normal-sized blocks, and verify we
                // can read past 2GB
                Path TestPath2 = new Path("/b");
                conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 268435456L);
                DFSTestUtil.CreateFile(fs, TestPath2, 1024 * 1024, TestFileLength, 268435456L, (short
                                                                                                )1, unchecked ((int)(0xA)));
                fsIn2 = fs.Open(TestPath2);
                fsIn2.Seek(2147483640L);
                buf2 = fsIn2.Read(null, 1024, EnumSet.Of(ReadOption.SkipChecksums));
                NUnit.Framework.Assert.AreEqual(8, buf2.Remaining());
                NUnit.Framework.Assert.AreEqual(2147483648L, fsIn2.GetPos());
                fsIn2.ReleaseBuffer(buf2);
                buf2 = null;
                buf2 = fsIn2.Read(null, 1024, EnumSet.Of(ReadOption.SkipChecksums));
                NUnit.Framework.Assert.AreEqual(1024, buf2.Remaining());
                NUnit.Framework.Assert.AreEqual(2147484672L, fsIn2.GetPos());
                fsIn2.ReleaseBuffer(buf2);
                buf2 = null;
            }
            finally
            {
                if (buf1 != null)
                {
                    fsIn.ReleaseBuffer(buf1);
                }
                if (buf2 != null)
                {
                    fsIn2.ReleaseBuffer(buf2);
                }
                IOUtils.Cleanup(null, fsIn, fsIn2);
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
コード例 #23
0
        /// <summary>
        /// When mark() is used on BufferedInputStream, the request
        /// size on the checksum file system can be small.
        /// </summary>
        /// <remarks>
        /// When mark() is used on BufferedInputStream, the request
        /// size on the checksum file system can be small.  However,
        /// checksum file system currently depends on the request size
        /// &gt;= bytesPerSum to work properly.
        /// </remarks>
        /// <exception cref="System.IO.IOException"/>
        public virtual void TestTruncatedInputBug()
        {
            int           ioBufSize = 512;
            int           fileSize  = ioBufSize * 4;
            int           filePos   = 0;
            Configuration conf      = new Configuration();

            conf.SetInt("io.file.buffer.size", ioBufSize);
            FileSystem fileSys = FileSystem.GetLocal(conf);

            try
            {
                // First create a test input file.
                Path testFile = new Path(TestRootDir, "HADOOP-1489");
                WriteFile(fileSys, testFile, fileSize);
                Assert.True(fileSys.Exists(testFile));
                Assert.True(fileSys.GetFileStatus(testFile).GetLen() == fileSize
                            );
                // Now read the file for ioBufSize bytes
                FSDataInputStream @in = fileSys.Open(testFile, ioBufSize);
                // seek beyond data buffered by open
                filePos += ioBufSize * 2 + (ioBufSize - 10);
                @in.Seek(filePos);
                // read 4 more bytes before marking
                for (int i = 0; i < 4; ++i)
                {
                    if (@in.Read() == -1)
                    {
                        break;
                    }
                    ++filePos;
                }
                // Now set mark() to trigger the bug
                // NOTE: in the fixed code, mark() does nothing (not supported) and
                //   hence won't trigger this bug.
                @in.Mark(1);
                System.Console.Out.WriteLine("MARKED");
                // Try to read the rest
                while (filePos < fileSize)
                {
                    if (@in.Read() == -1)
                    {
                        break;
                    }
                    ++filePos;
                }
                @in.Close();
                System.Console.Out.WriteLine("Read " + filePos + " bytes." + " file size=" + fileSize
                                             );
                Assert.True(filePos == fileSize);
            }
            finally
            {
                try
                {
                    fileSys.Close();
                }
                catch (Exception)
                {
                }
            }
        }
コード例 #24
0
ファイル: AvroFSInput.cs プロジェクト: orf53975/hadoop.net
 /// <summary>
 /// Construct given an
 /// <see cref="FSDataInputStream"/>
 /// and its length.
 /// </summary>
 public AvroFSInput(FSDataInputStream @in, long len)
 {
     this.stream = @in;
     this.len    = len;
 }
コード例 #25
0
        /// <exception cref="System.IO.IOException"/>
        public virtual void TestReportChecksumFailure()
        {
            @base.Mkdirs();
            Assert.True(@base.Exists() && @base.IsDirectory());
            FilePath dir1 = new FilePath(@base, "dir1");
            FilePath dir2 = new FilePath(dir1, "dir2");

            dir2.Mkdirs();
            Assert.True(dir2.Exists() && FileUtil.CanWrite(dir2));
            string             dataFileName = "corruptedData";
            Path               dataPath     = new Path(new FilePath(dir2, dataFileName).ToURI());
            Path               checksumPath = fileSys.GetChecksumFile(dataPath);
            FSDataOutputStream fsdos        = fileSys.Create(dataPath);

            try
            {
                fsdos.WriteUTF("foo");
            }
            finally
            {
                fsdos.Close();
            }
            Assert.True(fileSys.PathToFile(dataPath).Exists());
            long dataFileLength = fileSys.GetFileStatus(dataPath).GetLen();

            Assert.True(dataFileLength > 0);
            // check the the checksum file is created and not empty:
            Assert.True(fileSys.PathToFile(checksumPath).Exists());
            long checksumFileLength = fileSys.GetFileStatus(checksumPath).GetLen();

            Assert.True(checksumFileLength > 0);
            // this is a hack to force the #reportChecksumFailure() method to stop
            // climbing up at the 'base' directory and use 'dir1/bad_files' as the
            // corrupted files storage:
            FileUtil.SetWritable(@base, false);
            FSDataInputStream dataFsdis     = fileSys.Open(dataPath);
            FSDataInputStream checksumFsdis = fileSys.Open(checksumPath);
            bool retryIsNecessary           = fileSys.ReportChecksumFailure(dataPath, dataFsdis, 0, checksumFsdis
                                                                            , 0);

            Assert.True(!retryIsNecessary);
            // the data file should be moved:
            Assert.True(!fileSys.PathToFile(dataPath).Exists());
            // the checksum file should be moved:
            Assert.True(!fileSys.PathToFile(checksumPath).Exists());
            // check that the files exist in the new location where they were moved:
            FilePath[] dir1files = dir1.ListFiles(new _FileFilter_352());
            Assert.True(dir1files != null);
            Assert.True(dir1files.Length == 1);
            FilePath badFilesDir = dir1files[0];

            FilePath[] badFiles = badFilesDir.ListFiles();
            Assert.True(badFiles != null);
            Assert.True(badFiles.Length == 2);
            bool dataFileFound     = false;
            bool checksumFileFound = false;

            foreach (FilePath badFile in badFiles)
            {
                if (badFile.GetName().StartsWith(dataFileName))
                {
                    Assert.True(dataFileLength == badFile.Length());
                    dataFileFound = true;
                }
                else
                {
                    if (badFile.GetName().Contains(dataFileName + ".crc"))
                    {
                        Assert.True(checksumFileLength == badFile.Length());
                        checksumFileFound = true;
                    }
                }
            }
            Assert.True(dataFileFound);
            Assert.True(checksumFileFound);
        }
コード例 #26
0
ファイル: ChecksumFs.cs プロジェクト: orf53975/hadoop.net
 /// <summary>Report a checksum error to the file system.</summary>
 /// <param name="f">the file name containing the error</param>
 /// <param name="in">the stream open on the file</param>
 /// <param name="inPos">the position of the beginning of the bad data in the file</param>
 /// <param name="sums">the stream open on the checksum file</param>
 /// <param name="sumsPos">
 /// the position of the beginning of the bad data in the
 /// checksum file
 /// </param>
 /// <returns>if retry is neccessary</returns>
 public virtual bool ReportChecksumFailure(Path f, FSDataInputStream @in, long inPos
                                           , FSDataInputStream sums, long sumsPos)
 {
     return(false);
 }
コード例 #27
0
        public virtual void TestZeroCopyMmapCache()
        {
            HdfsConfiguration conf           = InitZeroCopyTest();
            MiniDFSCluster    cluster        = null;
            Path              TestPath       = new Path("/a");
            int               TestFileLength = 5 * BlockSize;
            int               RandomSeed     = 23453;
            string            Context        = "testZeroCopyMmapCacheContext";
            FSDataInputStream fsIn           = null;

            ByteBuffer[]          results = new ByteBuffer[] { null, null, null, null };
            DistributedFileSystem fs      = null;

            conf.Set(DFSConfigKeys.DfsClientContext, Context);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            cluster.WaitActive();
            fs = cluster.GetFileSystem();
            DFSTestUtil.CreateFile(fs, TestPath, TestFileLength, (short)1, RandomSeed);
            try
            {
                DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
            }
            catch (Exception e)
            {
                NUnit.Framework.Assert.Fail("unexpected InterruptedException during " + "waitReplication: "
                                            + e);
            }
            catch (TimeoutException e)
            {
                NUnit.Framework.Assert.Fail("unexpected TimeoutException during " + "waitReplication: "
                                            + e);
            }
            fsIn = fs.Open(TestPath);
            byte[] original = new byte[TestFileLength];
            IOUtils.ReadFully(fsIn, original, 0, TestFileLength);
            fsIn.Close();
            fsIn = fs.Open(TestPath);
            ShortCircuitCache cache = ClientContext.Get(Context, new DFSClient.Conf(conf)).GetShortCircuitCache
                                          ();

            cache.Accept(new TestEnhancedByteBufferAccess.CountingVisitor(0, 5, 5, 0));
            results[0] = fsIn.Read(null, BlockSize, EnumSet.Of(ReadOption.SkipChecksums));
            fsIn.Seek(0);
            results[1] = fsIn.Read(null, BlockSize, EnumSet.Of(ReadOption.SkipChecksums));
            // The mmap should be of the first block of the file.
            ExtendedBlock firstBlock = DFSTestUtil.GetFirstBlock(fs, TestPath);

            cache.Accept(new _CacheVisitor_373(firstBlock));
            // The replica should not yet be evictable, since we have it open.
            // Read more blocks.
            results[2] = fsIn.Read(null, BlockSize, EnumSet.Of(ReadOption.SkipChecksums));
            results[3] = fsIn.Read(null, BlockSize, EnumSet.Of(ReadOption.SkipChecksums));
            // we should have 3 mmaps, 1 evictable
            cache.Accept(new TestEnhancedByteBufferAccess.CountingVisitor(3, 5, 2, 0));
            // After we close the cursors, the mmaps should be evictable for
            // a brief period of time.  Then, they should be closed (we're
            // using a very quick timeout)
            foreach (ByteBuffer buffer in results)
            {
                if (buffer != null)
                {
                    fsIn.ReleaseBuffer(buffer);
                }
            }
            fsIn.Close();
            GenericTestUtils.WaitFor(new _Supplier_407(cache), 10, 60000);
            cache.Accept(new TestEnhancedByteBufferAccess.CountingVisitor(0, -1, -1, -1));
            fs.Close();
            cluster.Shutdown();
        }