Exemple #1
0
        // test pread can survive datanode restarts
        /// <exception cref="System.IO.IOException"/>
        private void DatanodeRestartTest(MiniDFSCluster cluster, FileSystem fileSys, Path
                                         name)
        {
            // skip this test if using simulated storage since simulated blocks
            // don't survive datanode restarts.
            if (simulatedStorage)
            {
                return;
            }
            int numBlocks = 1;

            NUnit.Framework.Assert.IsTrue(numBlocks <= DFSConfigKeys.DfsClientMaxBlockAcquireFailuresDefault
                                          );
            byte[] expected = new byte[numBlocks * blockSize];
            Random rand     = new Random(seed);

            rand.NextBytes(expected);
            byte[]            actual = new byte[numBlocks * blockSize];
            FSDataInputStream stm    = fileSys.Open(name);

            // read a block and get block locations cached as a result
            stm.ReadFully(0, actual);
            CheckAndEraseData(actual, 0, expected, "Pread Datanode Restart Setup");
            // restart all datanodes. it is expected that they will
            // restart on different ports, hence, cached block locations
            // will no longer work.
            NUnit.Framework.Assert.IsTrue(cluster.RestartDataNodes());
            cluster.WaitActive();
            // verify the block can be read again using the same InputStream
            // (via re-fetching of block locations from namenode). there is a
            // 3 sec sleep in chooseDataNode(), which can be shortened for
            // this test if configurable.
            stm.ReadFully(0, actual);
            CheckAndEraseData(actual, 0, expected, "Pread Datanode Restart Test");
        }
Exemple #2
0
        /// <summary>test seek</summary>
        /// <exception cref="System.IO.IOException"/>
        internal static void VerifySeek(FileSystem fs, Path p, long offset, long length,
                                        byte[] buf, byte[] expected)
        {
            long remaining = length - offset;
            long @checked  = 0;

            Log.Info("XXX SEEK: offset=" + offset + ", remaining=" + remaining);
            TestWebHDFS.Ticker t = new TestWebHDFS.Ticker("SEEK", "offset=%d, remaining=%d",
                                                          offset, remaining);
            FSDataInputStream @in = fs.Open(p, 64 << 10);

            @in.Seek(offset);
            for (; remaining > 0;)
            {
                t.Tick(@checked, "offset=%d, remaining=%d", offset, remaining);
                int n = (int)Math.Min(remaining, buf.Length);
                @in.ReadFully(buf, 0, n);
                CheckData(offset, remaining, n, buf, expected);
                offset    += n;
                remaining -= n;
                @checked  += n;
            }
            @in.Close();
            t.End(@checked);
        }
        /// <exception cref="System.IO.IOException"/>
        private void CheckFile(FileSystem fileSys, Path name)
        {
            BlockLocation[] locations = fileSys.GetFileBlockLocations(fileSys.GetFileStatus(name
                                                                                            ), 0, fileSize);
            NUnit.Framework.Assert.AreEqual("Number of blocks", fileSize, locations.Length);
            FSDataInputStream stm = fileSys.Open(name);

            byte[] expected = new byte[fileSize];
            if (simulatedStorage)
            {
                for (int i = 0; i < expected.Length; ++i)
                {
                    expected[i] = SimulatedFSDataset.DefaultDatabyte;
                }
            }
            else
            {
                Random rand = new Random(seed);
                rand.NextBytes(expected);
            }
            // do a sanity check. Read the file
            byte[] actual = new byte[fileSize];
            stm.ReadFully(0, actual);
            CheckAndEraseData(actual, 0, expected, "Read Sanity Test");
            stm.Close();
        }
Exemple #4
0
            /// <exception cref="System.IO.IOException"/>
            public ChecksumFSInputChecker(ChecksumFileSystem fs, Path file, int bufferSize)
                : base(file, fs.GetFileStatus(file).GetReplication())
            {
                this.datas = fs.GetRawFileSystem().Open(file, bufferSize);
                this.fs    = fs;
                Path sumFile = fs.GetChecksumFile(file);

                try
                {
                    int sumBufferSize = fs.GetSumBufferSize(fs.GetBytesPerSum(), bufferSize);
                    sums = fs.GetRawFileSystem().Open(sumFile, sumBufferSize);
                    byte[] version = new byte[ChecksumVersion.Length];
                    sums.ReadFully(version);
                    if (!Arrays.Equals(version, ChecksumVersion))
                    {
                        throw new IOException("Not a checksum file: " + sumFile);
                    }
                    this.bytesPerSum = sums.ReadInt();
                    Set(fs.verifyChecksum, DataChecksum.NewCrc32(), bytesPerSum, 4);
                }
                catch (FileNotFoundException)
                {
                    // quietly ignore
                    Set(fs.verifyChecksum, null, 1, 0);
                }
                catch (IOException e)
                {
                    // loudly ignore
                    Log.Warn("Problem opening checksum file: " + file + ".  Ignoring exception: ", e);
                    Set(fs.verifyChecksum, null, 1, 0);
                }
            }
Exemple #5
0
        /// <exception cref="System.IO.IOException"/>
        internal virtual void ReadFile(FileSystem fs, Path path, int fileLen)
        {
            byte[]            arr = new byte[fileLen];
            FSDataInputStream @in = fs.Open(path);

            @in.ReadFully(arr);
        }
Exemple #6
0
 /// <summary>
 /// Verify that the read at a specific offset in a stream
 /// matches that expected
 /// </summary>
 /// <param name="stm">stream</param>
 /// <param name="fileContents">original file contents</param>
 /// <param name="seekOff">seek offset</param>
 /// <param name="toRead">number of bytes to read</param>
 /// <exception cref="System.IO.IOException">IO problems</exception>
 public static void VerifyRead(FSDataInputStream stm, byte[] fileContents, int seekOff
                               , int toRead)
 {
     byte[] @out = new byte[toRead];
     stm.Seek(seekOff);
     stm.ReadFully(@out);
     byte[] expected = Arrays.CopyOfRange(fileContents, seekOff, seekOff + toRead);
     CompareByteArrays(expected, @out, toRead);
 }
Exemple #7
0
        /// <exception cref="System.IO.IOException"/>
        public virtual void TestSeek()
        {
            Path dir = new Path("/test/testSeek");

            NUnit.Framework.Assert.IsTrue(fs.Mkdirs(dir));
            {
                //test zero file size
                Path zero = new Path(dir, "zero");
                fs.Create(zero).Close();
                int count             = 0;
                FSDataInputStream @in = fs.Open(zero);
                for (; @in.Read() != -1; count++)
                {
                }
                @in.Close();
                NUnit.Framework.Assert.AreEqual(0, count);
            }
            byte[] mydata = new byte[1 << 20];
            new Random().NextBytes(mydata);
            Path p = new Path(dir, "file");
            FSDataOutputStream @out = fs.Create(p, false, 4096, (short)3, 1L << 17);

            @out.Write(mydata, 0, mydata.Length);
            @out.Close();
            int one_third = mydata.Length / 3;
            int two_third = one_third * 2;
            {
                //test seek
                int               offset = one_third;
                int               len    = mydata.Length - offset;
                byte[]            buf    = new byte[len];
                FSDataInputStream @in    = fs.Open(p);
                @in.Seek(offset);
                //read all remaining data
                @in.ReadFully(buf);
                @in.Close();
                for (int i = 0; i < buf.Length; i++)
                {
                    NUnit.Framework.Assert.AreEqual("Position " + i + ", offset=" + offset + ", length="
                                                    + len, mydata[i + offset], buf[i]);
                }
            }
            {
                //test position read (read the data after the two_third location)
                int               offset = two_third;
                int               len    = mydata.Length - offset;
                byte[]            buf    = new byte[len];
                FSDataInputStream @in    = fs.Open(p);
                @in.ReadFully(offset, buf);
                @in.Close();
                for (int i = 0; i < buf.Length; i++)
                {
                    NUnit.Framework.Assert.AreEqual("Position " + i + ", offset=" + offset + ", length="
                                                    + len, mydata[i + offset], buf[i]);
                }
            }
        }
        //
        // verify that the data written are sane
        //
        /// <exception cref="System.IO.IOException"/>
        private static void CheckFile(FileSystem fileSys, Path name, int repl, int numblocks
                                      , int filesize, long seed)
        {
            bool done    = false;
            int  attempt = 0;
            long len     = fileSys.GetFileStatus(name).GetLen();

            NUnit.Framework.Assert.IsTrue(name + " should be of size " + filesize + " but found to be of size "
                                          + len, len == filesize);
            // wait till all full blocks are confirmed by the datanodes.
            while (!done)
            {
                attempt++;
                try
                {
                    Sharpen.Thread.Sleep(1000);
                }
                catch (Exception)
                {
                }
                done = true;
                BlockLocation[] locations = fileSys.GetFileBlockLocations(fileSys.GetFileStatus(name
                                                                                                ), 0, filesize);
                if (locations.Length < numblocks)
                {
                    if (attempt > 100)
                    {
                        System.Console.Out.WriteLine("File " + name + " has only " + locations.Length + " blocks, "
                                                     + " but is expected to have " + numblocks + " blocks.");
                    }
                    done = false;
                    continue;
                }
                for (int idx = 0; idx < locations.Length; idx++)
                {
                    if (locations[idx].GetHosts().Length < repl)
                    {
                        if (attempt > 100)
                        {
                            System.Console.Out.WriteLine("File " + name + " has " + locations.Length + " blocks: "
                                                         + " The " + idx + " block has only " + locations[idx].GetHosts().Length + " replicas but is expected to have "
                                                         + repl + " replicas.");
                        }
                        done = false;
                        break;
                    }
                }
            }
            FSDataInputStream stm = fileSys.Open(name);

            byte[] expected = AppendTestUtil.RandomBytes(seed, fileSize);
            // do a sanity check. Read the file
            byte[] actual = new byte[filesize];
            stm.ReadFully(0, actual);
            CheckData(actual, 0, expected, "Read 1");
        }
Exemple #9
0
        /// <exception cref="System.Exception"/>
        private void CheckFile(Path name)
        {
            FSDataInputStream stm = fileSys.Open(name);

            // do a sanity check. Read the file
            stm.ReadFully(0, actual);
            CheckAndEraseData(actual, 0, expected, "Read Sanity Test");
            stm.Close();
            // do a sanity check. Get the file checksum
            fileSys.GetFileChecksum(name);
        }
Exemple #10
0
        /// <summary>Reads from file and makes sure that it matches the pattern</summary>
        /// <param name="fs">a reference to FileSystem</param>
        /// <param name="name">Path of a file</param>
        /// <param name="fileSize">size of the file</param>
        /// <exception cref="System.IO.IOException">in case of errors</exception>
        internal static void CheckFullFile(FileSystem fs, Path name, long fileSize)
        {
            // read in chunks of 128 MB
            int readSize = pattern.Length * 16 * 1024 * 1024;

            if (readSize > int.MaxValue)
            {
                throw new IOException("A single read is too large " + readSize);
            }
            byte[] b           = new byte[readSize];
            long   bytesToRead = fileSize;

            byte[] compb = new byte[readSize];
            // buffer with correct data for comparison
            // initialize compare buffer
            for (int j = 0; j < readSize; j++)
            {
                compb[j] = pattern[j % pattern.Length];
            }
            FSDataInputStream stm = fs.Open(name);

            while (bytesToRead > 0)
            {
                // how many bytes we are reading in this iteration
                int thisread = (int)Math.Min(readSize, bytesToRead);
                stm.ReadFully(b, 0, thisread);
                // verify data read
                if (thisread == readSize)
                {
                    NUnit.Framework.Assert.IsTrue("file is corrupted at or after byte " + (fileSize -
                                                                                           bytesToRead), Arrays.Equals(b, compb));
                }
                else
                {
                    // b was only partially filled by last read
                    for (int k = 0; k < thisread; k++)
                    {
                        NUnit.Framework.Assert.IsTrue("file is corrupted at or after byte " + (fileSize -
                                                                                               bytesToRead), b[k] == compb[k]);
                    }
                }
                Log.Debug("Before update: to read: " + bytesToRead + "; read already: " + thisread
                          );
                bytesToRead -= thisread;
                Log.Debug("After  update: to read: " + bytesToRead + "; read already: " + thisread
                          );
            }
            stm.Close();
        }
Exemple #11
0
        /// <summary>Read in "length" bytes, convert to an ascii string</summary>
        /// <param name="fs">filesystem</param>
        /// <param name="path">path to read</param>
        /// <param name="length">#of bytes to read.</param>
        /// <returns>the bytes read and converted to a string</returns>
        /// <exception cref="System.IO.IOException">IO problems</exception>
        public static string ReadBytesToString(FileSystem fs, Path path, int length)
        {
            FSDataInputStream @in = fs.Open(path);

            try
            {
                byte[] buf = new byte[length];
                @in.ReadFully(0, buf);
                return(ToChar(buf));
            }
            finally
            {
                @in.Close();
            }
        }
Exemple #12
0
        /// <exception cref="System.IO.IOException"/>
        private void WriteFile(FileSystem fileSys, Path name)
        {
            int replication = 3;
            // We need > 1 blocks to test out the hedged reads.
            // create and write a file that contains three blocks of data
            DataOutputStream stm = fileSys.Create(name, true, 4096, (short)replication, blockSize
                                                  );

            // test empty file open and read
            stm.Close();
            FSDataInputStream @in = fileSys.Open(name);

            byte[] buffer = new byte[12 * blockSize];
            @in.ReadFully(0, buffer, 0, 0);
            IOException res = null;

            try
            {
                // read beyond the end of the file
                @in.ReadFully(0, buffer, 0, 1);
            }
            catch (IOException e)
            {
                // should throw an exception
                res = e;
            }
            NUnit.Framework.Assert.IsTrue("Error reading beyond file boundary.", res != null);
            @in.Close();
            if (!fileSys.Delete(name, true))
            {
                NUnit.Framework.Assert.IsTrue("Cannot delete file", false);
            }
            // now create the real file
            DFSTestUtil.CreateFile(fileSys, name, 12 * blockSize, 12 * blockSize, blockSize,
                                   (short)replication, seed);
        }
        /// <exception cref="System.IO.IOException"/>
        private byte[] ReadFile(Path file, long numBytes)
        {
            byte[]            data = new byte[(int)numBytes];
            FSDataInputStream @in  = fs.Open(file);

            try
            {
                @in.ReadFully(data);
            }
            finally
            {
                IOUtils.Cleanup(Log, @in);
            }
            return(data);
        }
        /// <exception cref="System.IO.IOException"/>
        private void VerifyFile(FileSystem dfs, Path filepath, byte[] actual, int size)
        {
            AppendTestUtil.Log.Info("Lease for file " + filepath + " is recovered. " + "Validating its contents now..."
                                    );
            // verify that file-size matches
            NUnit.Framework.Assert.IsTrue("File should be " + size + " bytes, but is actually "
                                          + " found to be " + dfs.GetFileStatus(filepath).GetLen() + " bytes", dfs.GetFileStatus
                                              (filepath).GetLen() == size);
            // verify that there is enough data to read.
            System.Console.Out.WriteLine("File size is good. Now validating sizes from datanodes..."
                                         );
            FSDataInputStream stmin = dfs.Open(filepath);

            stmin.ReadFully(0, actual, 0, size);
            stmin.Close();
        }
Exemple #15
0
        /// <exception cref="System.IO.IOException"/>
        public static void CheckFullFile(FileSystem fs, Path name, int len, byte[] compareContent
                                         , string message, bool checkFileStatus)
        {
            if (checkFileStatus)
            {
                FileStatus status = fs.GetFileStatus(name);
                NUnit.Framework.Assert.AreEqual("len=" + len + " but status.getLen()=" + status.GetLen
                                                    (), len, status.GetLen());
            }
            FSDataInputStream stm = fs.Open(name);

            byte[] actual = new byte[len];
            stm.ReadFully(0, actual);
            CheckData(actual, 0, compareContent, message);
            stm.Close();
        }
Exemple #16
0
        /// <exception cref="System.Exception"/>
        public virtual void TestFadviseSkippedForSmallReads()
        {
            // start a cluster
            Log.Info("testFadviseSkippedForSmallReads");
            tracker.Clear();
            Configuration conf = new HdfsConfiguration();

            conf.SetBoolean(DFSConfigKeys.DfsDatanodeDropCacheBehindReadsKey, true);
            conf.SetBoolean(DFSConfigKeys.DfsDatanodeDropCacheBehindWritesKey, true);
            MiniDFSCluster    cluster     = null;
            string            TestPath    = "/test";
            int               TestPathLen = MaxTestFileLen;
            FSDataInputStream fis         = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                FileSystem fs = cluster.GetFileSystem();
                // create new file
                CreateHdfsFile(fs, new Path(TestPath), TestPathLen, null);
                // Since the DataNode was configured with drop-behind, and we didn't
                // specify any policy, we should have done drop-behind.
                ExtendedBlock block = cluster.GetNameNode().GetRpcServer().GetBlockLocations(TestPath
                                                                                             , 0, long.MaxValue).Get(0).GetBlock();
                string fadvisedFileName         = cluster.GetBlockFile(0, block).GetName();
                TestCachingStrategy.Stats stats = tracker.GetStats(fadvisedFileName);
                stats.AssertDroppedInRange(0, TestPathLen - WritePacketSize);
                stats.Clear();
                stats.AssertNotDroppedInRange(0, TestPathLen);
                // read file
                fis = fs.Open(new Path(TestPath));
                byte[] buf = new byte[17];
                fis.ReadFully(4096, buf, 0, buf.Length);
                // we should not have dropped anything because of the small read.
                stats = tracker.GetStats(fadvisedFileName);
                stats.AssertNotDroppedInRange(0, TestPathLen - WritePacketSize);
            }
            finally
            {
                IOUtils.Cleanup(null, fis);
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Exemple #17
0
        /// <summary>
        /// Check file content, reading as user
        /// <paramref name="readingUser"/>
        ///
        /// </summary>
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        internal static void CheckFileContent(URI uri, Path name, byte[] expected, int readOffset
                                              , string readingUser, Configuration conf, bool legacyShortCircuitFails)
        {
            // Ensure short circuit is enabled
            DistributedFileSystem fs = GetFileSystem(readingUser, uri, conf);
            ClientContext         getClientContext = ClientContext.GetFromConf(conf);

            if (legacyShortCircuitFails)
            {
                NUnit.Framework.Assert.IsFalse(getClientContext.GetDisableLegacyBlockReaderLocal(
                                                   ));
            }
            FSDataInputStream stm = fs.Open(name);

            byte[] actual = new byte[expected.Length - readOffset];
            stm.ReadFully(readOffset, actual);
            CheckData(actual, readOffset, expected, "Read 2");
            stm.Close();
            // Now read using a different API.
            actual = new byte[expected.Length - readOffset];
            stm    = fs.Open(name);
            IOUtils.SkipFully(stm, readOffset);
            //Read a small number of bytes first.
            int nread = stm.Read(actual, 0, 3);

            nread += stm.Read(actual, nread, 2);
            //Read across chunk boundary
            nread += stm.Read(actual, nread, 517);
            CheckData(actual, readOffset, expected, nread, "A few bytes");
            //Now read rest of it
            while (nread < actual.Length)
            {
                int nbytes = stm.Read(actual, nread, actual.Length - nread);
                if (nbytes < 0)
                {
                    throw new EOFException("End of file reached before reading fully.");
                }
                nread += nbytes;
            }
            CheckData(actual, readOffset, expected, "Read 3");
            if (legacyShortCircuitFails)
            {
                NUnit.Framework.Assert.IsTrue(getClientContext.GetDisableLegacyBlockReaderLocal()
                                              );
            }
            stm.Close();
        }
Exemple #18
0
        /// <exception cref="System.Exception"/>
        private byte[] ReadFile(Path inputPath, long len)
        {
            FSDataInputStream fsIn = null;

            try
            {
                fsIn = fs.Open(inputPath);
                // state data will not be that "long"
                byte[] data = new byte[(int)len];
                fsIn.ReadFully(data);
                return(data);
            }
            finally
            {
                IOUtils.Cleanup(Log, fsIn);
            }
        }
Exemple #19
0
        /// <exception cref="System.IO.IOException"/>
        public static JobSplit.TaskSplitMetaInfo[] ReadSplitMetaInfo(JobID jobId, FileSystem
                                                                     fs, Configuration conf, Path jobSubmitDir)
        {
            long maxMetaInfoSize = conf.GetLong(MRJobConfig.SplitMetainfoMaxsize, MRJobConfig
                                                .DefaultSplitMetainfoMaxsize);
            Path       metaSplitFile = JobSubmissionFiles.GetJobSplitMetaFile(jobSubmitDir);
            string     jobSplitFile  = JobSubmissionFiles.GetJobSplitFile(jobSubmitDir).ToString();
            FileStatus fStatus       = fs.GetFileStatus(metaSplitFile);

            if (maxMetaInfoSize > 0 && fStatus.GetLen() > maxMetaInfoSize)
            {
                throw new IOException("Split metadata size exceeded " + maxMetaInfoSize + ". Aborting job "
                                      + jobId);
            }
            FSDataInputStream @in = fs.Open(metaSplitFile);

            byte[] header = new byte[JobSplit.MetaSplitFileHeader.Length];
            @in.ReadFully(header);
            if (!Arrays.Equals(JobSplit.MetaSplitFileHeader, header))
            {
                throw new IOException("Invalid header on split file");
            }
            int vers = WritableUtils.ReadVInt(@in);

            if (vers != JobSplit.MetaSplitVersion)
            {
                @in.Close();
                throw new IOException("Unsupported split version " + vers);
            }
            int numSplits = WritableUtils.ReadVInt(@in);

            //TODO: check for insane values
            JobSplit.TaskSplitMetaInfo[] allSplitMetaInfo = new JobSplit.TaskSplitMetaInfo[numSplits
                                                            ];
            for (int i = 0; i < numSplits; i++)
            {
                JobSplit.SplitMetaInfo splitMetaInfo = new JobSplit.SplitMetaInfo();
                splitMetaInfo.ReadFields(@in);
                JobSplit.TaskSplitIndex splitIndex = new JobSplit.TaskSplitIndex(jobSplitFile, splitMetaInfo
                                                                                 .GetStartOffset());
                allSplitMetaInfo[i] = new JobSplit.TaskSplitMetaInfo(splitIndex, splitMetaInfo.GetLocations
                                                                         (), splitMetaInfo.GetInputDataLength());
            }
            @in.Close();
            return(allSplitMetaInfo);
        }
        /// <exception cref="System.Exception"/>
        private byte[] GetFileContentsUsingDfs(string fileName, int len)
        {
            FSDataInputStream @in = hdfs.Open(new Path(fileName));

            byte[] ret = new byte[len];
            @in.ReadFully(ret);
            try
            {
                @in.ReadByte();
                NUnit.Framework.Assert.Fail("expected end of file");
            }
            catch (EOFException)
            {
            }
            // expected. Unfortunately there is no associated message to check
            @in.Close();
            return(ret);
        }
Exemple #21
0
        public virtual void TestConcatNotCompleteBlock()
        {
            long trgFileLen = blockSize * 3;
            long srcFileLen = blockSize * 3 + 20;
            // block at the end - not full
            // create first file
            string name1     = "/trg";
            string name2     = "/src";
            Path   filePath1 = new Path(name1);

            DFSTestUtil.CreateFile(dfs, filePath1, trgFileLen, ReplFactor, 1);
            HdfsFileStatus fStatus = nn.GetFileInfo(name1);
            long           fileLen = fStatus.GetLen();

            NUnit.Framework.Assert.AreEqual(fileLen, trgFileLen);
            //read the file
            FSDataInputStream stm = dfs.Open(filePath1);

            byte[] byteFile1 = new byte[(int)trgFileLen];
            stm.ReadFully(0, byteFile1);
            stm.Close();
            LocatedBlocks lb1       = nn.GetBlockLocations(name1, 0, trgFileLen);
            Path          filePath2 = new Path(name2);

            DFSTestUtil.CreateFile(dfs, filePath2, srcFileLen, ReplFactor, 1);
            fStatus = nn.GetFileInfo(name2);
            fileLen = fStatus.GetLen();
            NUnit.Framework.Assert.AreEqual(srcFileLen, fileLen);
            // read the file
            stm = dfs.Open(filePath2);
            byte[] byteFile2 = new byte[(int)srcFileLen];
            stm.ReadFully(0, byteFile2);
            stm.Close();
            LocatedBlocks lb2 = nn.GetBlockLocations(name2, 0, srcFileLen);

            System.Console.Out.WriteLine("trg len=" + trgFileLen + "; src len=" + srcFileLen);
            // move the blocks
            dfs.Concat(filePath1, new Path[] { filePath2 });
            long totalLen = trgFileLen + srcFileLen;

            fStatus = nn.GetFileInfo(name1);
            fileLen = fStatus.GetLen();
            // read the resulting file
            stm = dfs.Open(filePath1);
            byte[] byteFileConcat = new byte[(int)fileLen];
            stm.ReadFully(0, byteFileConcat);
            stm.Close();
            LocatedBlocks lbConcat = nn.GetBlockLocations(name1, 0, fileLen);

            //verifications
            // 1. number of blocks
            NUnit.Framework.Assert.AreEqual(lbConcat.LocatedBlockCount(), lb1.LocatedBlockCount
                                                () + lb2.LocatedBlockCount());
            // 2. file lengths
            System.Console.Out.WriteLine("file1 len=" + fileLen + "; total len=" + totalLen);
            NUnit.Framework.Assert.AreEqual(fileLen, totalLen);
            // 3. removal of the src file
            fStatus = nn.GetFileInfo(name2);
            NUnit.Framework.Assert.IsNull("File " + name2 + "still exists", fStatus);
            // file shouldn't exist
            // 4. content
            CheckFileContent(byteFileConcat, new byte[][] { byteFile1, byteFile2 });
        }
Exemple #22
0
        /// <exception cref="System.IO.IOException"/>
        public virtual void TestHandleTruncatedBlockFile()
        {
            MiniDFSCluster    cluster = null;
            HdfsConfiguration conf    = new HdfsConfiguration();

            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, true);
            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, false);
            conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, "/tmp/testHandleTruncatedBlockFile._PORT"
                     );
            conf.Set(DFSConfigKeys.DfsChecksumTypeKey, "CRC32C");
            Path TestPath          = new Path("/a");
            Path TestPath2         = new Path("/b");
            long RandomSeed        = 4567L;
            long RandomSeed2       = 4568L;
            FSDataInputStream fsIn = null;
            int TestLength         = 3456;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                FileSystem fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, TestPath, TestLength, (short)1, RandomSeed);
                DFSTestUtil.CreateFile(fs, TestPath2, TestLength, (short)1, RandomSeed2);
                fsIn = cluster.GetFileSystem().Open(TestPath2);
                byte[] original = new byte[TestLength];
                IOUtils.ReadFully(fsIn, original, 0, TestLength);
                fsIn.Close();
                fsIn = null;
                try
                {
                    DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
                }
                catch (Exception e)
                {
                    NUnit.Framework.Assert.Fail("unexpected InterruptedException during " + "waitReplication: "
                                                + e);
                }
                catch (TimeoutException e)
                {
                    NUnit.Framework.Assert.Fail("unexpected TimeoutException during " + "waitReplication: "
                                                + e);
                }
                ExtendedBlock block    = DFSTestUtil.GetFirstBlock(fs, TestPath);
                FilePath      dataFile = cluster.GetBlockFile(0, block);
                cluster.Shutdown();
                cluster = null;
                RandomAccessFile raf = null;
                try
                {
                    raf = new RandomAccessFile(dataFile, "rw");
                    raf.SetLength(0);
                }
                finally
                {
                    if (raf != null)
                    {
                        raf.Close();
                    }
                }
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Format(false).Build();
                cluster.WaitActive();
                fs   = cluster.GetFileSystem();
                fsIn = fs.Open(TestPath);
                try
                {
                    byte[] buf = new byte[100];
                    fsIn.Seek(2000);
                    fsIn.ReadFully(buf, 0, buf.Length);
                    NUnit.Framework.Assert.Fail("shouldn't be able to read from corrupt 0-length " +
                                                "block file.");
                }
                catch (IOException e)
                {
                    DFSClient.Log.Error("caught exception ", e);
                }
                fsIn.Close();
                fsIn = null;
                // We should still be able to read the other file.
                // This is important because it indicates that we detected that the
                // previous block was corrupt, rather than blaming the problem on
                // communication.
                fsIn = fs.Open(TestPath2);
                byte[] buf_1 = new byte[original.Length];
                fsIn.ReadFully(buf_1, 0, buf_1.Length);
                TestBlockReaderLocal.AssertArrayRegionsEqual(original, 0, buf_1, 0, original.Length
                                                             );
                fsIn.Close();
                fsIn = null;
            }
            finally
            {
                if (fsIn != null)
                {
                    fsIn.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void TestMigrateOpenFileToArchival()
        {
            Log.Info("testMigrateOpenFileToArchival");
            Path fooDir = new Path("/foo");
            IDictionary <Path, BlockStoragePolicy> policyMap = Maps.NewHashMap();

            policyMap[fooDir] = Cold;
            TestStorageMover.NamespaceScheme nsScheme = new TestStorageMover.NamespaceScheme(
                Arrays.AsList(fooDir), null, BlockSize, null, policyMap);
            TestStorageMover.ClusterScheme clusterScheme = new TestStorageMover.ClusterScheme
                                                               (DefaultConf, NumDatanodes, Repl, GenStorageTypes(NumDatanodes), null);
            TestStorageMover.MigrationTest test = new TestStorageMover.MigrationTest(this, clusterScheme
                                                                                     , nsScheme);
            test.SetupCluster();
            // create an open file
            Banner("writing to file /foo/bar");
            Path barFile = new Path(fooDir, "bar");

            DFSTestUtil.CreateFile(test.dfs, barFile, BlockSize, (short)1, 0L);
            FSDataOutputStream @out = test.dfs.Append(barFile);

            @out.WriteBytes("hello, ");
            ((DFSOutputStream)@out.GetWrappedStream()).Hsync();
            try
            {
                Banner("start data migration");
                test.SetStoragePolicy();
                // set /foo to COLD
                test.Migrate();
                // make sure the under construction block has not been migrated
                LocatedBlocks lbs = test.dfs.GetClient().GetLocatedBlocks(barFile.ToString(), BlockSize
                                                                          );
                Log.Info("Locations: " + lbs);
                IList <LocatedBlock> blks = lbs.GetLocatedBlocks();
                NUnit.Framework.Assert.AreEqual(1, blks.Count);
                NUnit.Framework.Assert.AreEqual(1, blks[0].GetLocations().Length);
                Banner("finish the migration, continue writing");
                // make sure the writing can continue
                @out.WriteBytes("world!");
                ((DFSOutputStream)@out.GetWrappedStream()).Hsync();
                IOUtils.Cleanup(Log, @out);
                lbs = test.dfs.GetClient().GetLocatedBlocks(barFile.ToString(), BlockSize);
                Log.Info("Locations: " + lbs);
                blks = lbs.GetLocatedBlocks();
                NUnit.Framework.Assert.AreEqual(1, blks.Count);
                NUnit.Framework.Assert.AreEqual(1, blks[0].GetLocations().Length);
                Banner("finish writing, starting reading");
                // check the content of /foo/bar
                FSDataInputStream @in = test.dfs.Open(barFile);
                byte[]            buf = new byte[13];
                // read from offset 1024
                @in.ReadFully(BlockSize, buf, 0, buf.Length);
                IOUtils.Cleanup(Log, @in);
                NUnit.Framework.Assert.AreEqual("hello, world!", Sharpen.Runtime.GetStringForBytes
                                                    (buf));
            }
            finally
            {
                test.ShutdownCluster();
            }
        }
Exemple #24
0
        /* test read and getPos */
        /// <exception cref="System.Exception"/>
        private void CheckReadAndGetPos()
        {
            actual = new byte[FileSize];
            // test reads that do not cross checksum boundary
            stm.Seek(0);
            int offset;

            for (offset = 0; offset < BlockSize + BytesPerSum; offset += BytesPerSum)
            {
                NUnit.Framework.Assert.AreEqual(stm.GetPos(), offset);
                stm.ReadFully(actual, offset, BytesPerSum);
            }
            stm.ReadFully(actual, offset, FileSize - BlockSize - BytesPerSum);
            NUnit.Framework.Assert.AreEqual(stm.GetPos(), FileSize);
            CheckAndEraseData(actual, 0, expected, "Read Sanity Test");
            // test reads that cross checksum boundary
            stm.Seek(0L);
            NUnit.Framework.Assert.AreEqual(stm.GetPos(), 0L);
            stm.ReadFully(actual, 0, HalfChunkSize);
            NUnit.Framework.Assert.AreEqual(stm.GetPos(), HalfChunkSize);
            stm.ReadFully(actual, HalfChunkSize, BlockSize - HalfChunkSize);
            NUnit.Framework.Assert.AreEqual(stm.GetPos(), BlockSize);
            stm.ReadFully(actual, BlockSize, BytesPerSum + HalfChunkSize);
            NUnit.Framework.Assert.AreEqual(stm.GetPos(), BlockSize + BytesPerSum + HalfChunkSize
                                            );
            stm.ReadFully(actual, 2 * BlockSize - HalfChunkSize, FileSize - (2 * BlockSize -
                                                                             HalfChunkSize));
            NUnit.Framework.Assert.AreEqual(stm.GetPos(), FileSize);
            CheckAndEraseData(actual, 0, expected, "Read Sanity Test");
            // test read that cross block boundary
            stm.Seek(0L);
            stm.ReadFully(actual, 0, BytesPerSum + HalfChunkSize);
            NUnit.Framework.Assert.AreEqual(stm.GetPos(), BytesPerSum + HalfChunkSize);
            stm.ReadFully(actual, BytesPerSum + HalfChunkSize, BytesPerSum);
            NUnit.Framework.Assert.AreEqual(stm.GetPos(), BlockSize + HalfChunkSize);
            stm.ReadFully(actual, BlockSize + HalfChunkSize, FileSize - BlockSize - HalfChunkSize
                          );
            NUnit.Framework.Assert.AreEqual(stm.GetPos(), FileSize);
            CheckAndEraseData(actual, 0, expected, "Read Sanity Test");
        }
        public virtual void TestMissingBlocksAlert()
        {
            MiniDFSCluster cluster = null;

            try
            {
                Configuration conf = new HdfsConfiguration();
                //minimize test delay
                conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationIntervalKey, 0);
                conf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 10);
                int fileLen = 10 * 1024;
                conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, fileLen / 2);
                //start a cluster with single datanode
                cluster = new MiniDFSCluster.Builder(conf).Build();
                cluster.WaitActive();
                BlockManager          bm  = cluster.GetNamesystem().GetBlockManager();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                // create a normal file
                DFSTestUtil.CreateFile(dfs, new Path("/testMissingBlocksAlert/file1"), fileLen, (
                                           short)3, 0);
                Path corruptFile = new Path("/testMissingBlocks/corruptFile");
                DFSTestUtil.CreateFile(dfs, corruptFile, fileLen, (short)3, 0);
                // Corrupt the block
                ExtendedBlock block = DFSTestUtil.GetFirstBlock(dfs, corruptFile);
                NUnit.Framework.Assert.IsTrue(cluster.CorruptReplica(0, block));
                // read the file so that the corrupt block is reported to NN
                FSDataInputStream @in = dfs.Open(corruptFile);
                try
                {
                    @in.ReadFully(new byte[fileLen]);
                }
                catch (ChecksumException)
                {
                }
                // checksum error is expected.
                @in.Close();
                Log.Info("Waiting for missing blocks count to increase...");
                while (dfs.GetMissingBlocksCount() <= 0)
                {
                    Sharpen.Thread.Sleep(100);
                }
                NUnit.Framework.Assert.IsTrue(dfs.GetMissingBlocksCount() == 1);
                NUnit.Framework.Assert.AreEqual(4, dfs.GetUnderReplicatedBlocksCount());
                NUnit.Framework.Assert.AreEqual(3, bm.GetUnderReplicatedNotMissingBlocks());
                MBeanServer mbs        = ManagementFactory.GetPlatformMBeanServer();
                ObjectName  mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo"
                                                        );
                NUnit.Framework.Assert.AreEqual(1, (long)(long)mbs.GetAttribute(mxbeanName, "NumberOfMissingBlocks"
                                                                                ));
                // now do the reverse : remove the file expect the number of missing
                // blocks to go to zero
                dfs.Delete(corruptFile, true);
                Log.Info("Waiting for missing blocks count to be zero...");
                while (dfs.GetMissingBlocksCount() > 0)
                {
                    Sharpen.Thread.Sleep(100);
                }
                NUnit.Framework.Assert.AreEqual(2, dfs.GetUnderReplicatedBlocksCount());
                NUnit.Framework.Assert.AreEqual(2, bm.GetUnderReplicatedNotMissingBlocks());
                NUnit.Framework.Assert.AreEqual(0, (long)(long)mbs.GetAttribute(mxbeanName, "NumberOfMissingBlocks"
                                                                                ));
                Path replOneFile = new Path("/testMissingBlocks/replOneFile");
                DFSTestUtil.CreateFile(dfs, replOneFile, fileLen, (short)1, 0);
                ExtendedBlock replOneBlock = DFSTestUtil.GetFirstBlock(dfs, replOneFile);
                NUnit.Framework.Assert.IsTrue(cluster.CorruptReplica(0, replOneBlock));
                // read the file so that the corrupt block is reported to NN
                @in = dfs.Open(replOneFile);
                try
                {
                    @in.ReadFully(new byte[fileLen]);
                }
                catch (ChecksumException)
                {
                }
                // checksum error is expected.
                @in.Close();
                NUnit.Framework.Assert.AreEqual(1, dfs.GetMissingReplOneBlocksCount());
                NUnit.Framework.Assert.AreEqual(1, (long)(long)mbs.GetAttribute(mxbeanName, "NumberOfMissingBlocksWithReplicationFactorOne"
                                                                                ));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Exemple #26
0
        /// <exception cref="System.IO.IOException"/>
        private void PReadFile(FileSystem fileSys, Path name)
        {
            FSDataInputStream stm = fileSys.Open(name);

            byte[] expected = new byte[12 * blockSize];
            if (simulatedStorage)
            {
                for (int i = 0; i < expected.Length; i++)
                {
                    expected[i] = SimulatedFSDataset.DefaultDatabyte;
                }
            }
            else
            {
                Random rand = new Random(seed);
                rand.NextBytes(expected);
            }
            // do a sanity check. Read first 4K bytes
            byte[] actual = new byte[4096];
            stm.ReadFully(actual);
            CheckAndEraseData(actual, 0, expected, "Read Sanity Test");
            // now do a pread for the first 8K bytes
            actual = new byte[8192];
            DoPread(stm, 0L, actual, 0, 8192);
            CheckAndEraseData(actual, 0, expected, "Pread Test 1");
            // Now check to see if the normal read returns 4K-8K byte range
            actual = new byte[4096];
            stm.ReadFully(actual);
            CheckAndEraseData(actual, 4096, expected, "Pread Test 2");
            // Now see if we can cross a single block boundary successfully
            // read 4K bytes from blockSize - 2K offset
            stm.ReadFully(blockSize - 2048, actual, 0, 4096);
            CheckAndEraseData(actual, (blockSize - 2048), expected, "Pread Test 3");
            // now see if we can cross two block boundaries successfully
            // read blockSize + 4K bytes from blockSize - 2K offset
            actual = new byte[blockSize + 4096];
            stm.ReadFully(blockSize - 2048, actual);
            CheckAndEraseData(actual, (blockSize - 2048), expected, "Pread Test 4");
            // now see if we can cross two block boundaries that are not cached
            // read blockSize + 4K bytes from 10*blockSize - 2K offset
            actual = new byte[blockSize + 4096];
            stm.ReadFully(10 * blockSize - 2048, actual);
            CheckAndEraseData(actual, (10 * blockSize - 2048), expected, "Pread Test 5");
            // now check that even after all these preads, we can still read
            // bytes 8K-12K
            actual = new byte[4096];
            stm.ReadFully(actual);
            CheckAndEraseData(actual, 8192, expected, "Pread Test 6");
            // done
            stm.Close();
            // check block location caching
            stm = fileSys.Open(name);
            stm.ReadFully(1, actual, 0, 4096);
            stm.ReadFully(4 * blockSize, actual, 0, 4096);
            stm.ReadFully(7 * blockSize, actual, 0, 4096);
            actual = new byte[3 * 4096];
            stm.ReadFully(0 * blockSize, actual, 0, 3 * 4096);
            CheckAndEraseData(actual, 0, expected, "Pread Test 7");
            actual = new byte[8 * 4096];
            stm.ReadFully(3 * blockSize, actual, 0, 8 * 4096);
            CheckAndEraseData(actual, 3 * blockSize, expected, "Pread Test 8");
            // read the tail
            stm.ReadFully(11 * blockSize + blockSize / 2, actual, 0, blockSize / 2);
            IOException res = null;

            try
            {
                // read beyond the end of the file
                stm.ReadFully(11 * blockSize + blockSize / 2, actual, 0, blockSize);
            }
            catch (IOException e)
            {
                // should throw an exception
                res = e;
            }
            NUnit.Framework.Assert.IsTrue("Error reading beyond file boundary.", res != null);
            stm.Close();
        }