Beispiel #1
0
        public virtual void TestDeletionWithZeroSizeBlock2()
        {
            Path foo    = new Path("/foo");
            Path subDir = new Path(foo, "sub");
            Path bar    = new Path(subDir, "bar");

            DFSTestUtil.CreateFile(hdfs, bar, Blocksize, Replication, 0L);
            hdfs.Append(bar);
            INodeFile barNode = fsdir.GetINode4Write(bar.ToString()).AsFile();

            BlockInfoContiguous[] blks = barNode.GetBlocks();
            NUnit.Framework.Assert.AreEqual(1, blks.Length);
            ExtendedBlock previous = new ExtendedBlock(fsn.GetBlockPoolId(), blks[0]);

            cluster.GetNameNodeRpc().AddBlock(bar.ToString(), hdfs.GetClient().GetClientName(
                                                  ), previous, null, barNode.GetId(), null);
            SnapshotTestHelper.CreateSnapshot(hdfs, foo, "s1");
            barNode = fsdir.GetINode4Write(bar.ToString()).AsFile();
            blks    = barNode.GetBlocks();
            NUnit.Framework.Assert.AreEqual(2, blks.Length);
            NUnit.Framework.Assert.AreEqual(Blocksize, blks[0].GetNumBytes());
            NUnit.Framework.Assert.AreEqual(0, blks[1].GetNumBytes());
            hdfs.Delete(subDir, true);
            Path sbar = SnapshotTestHelper.GetSnapshotPath(foo, "s1", "sub/bar");

            barNode = fsdir.GetINode(sbar.ToString()).AsFile();
            blks    = barNode.GetBlocks();
            NUnit.Framework.Assert.AreEqual(1, blks.Length);
            NUnit.Framework.Assert.AreEqual(Blocksize, blks[0].GetNumBytes());
        }
Beispiel #2
0
        /// <exception cref="System.Exception"/>
        internal static INodeFile AssertBlockCollection(string path, int numBlocks, FSDirectory
                                                        dir, BlockManager blkManager)
        {
            INodeFile file = INodeFile.ValueOf(dir.GetINode(path), path);

            NUnit.Framework.Assert.AreEqual(numBlocks, file.GetBlocks().Length);
            foreach (BlockInfoContiguous b in file.GetBlocks())
            {
                AssertBlockCollection(blkManager, file, b);
            }
            return(file);
        }
Beispiel #3
0
 /// <exception cref="System.Exception"/>
 internal virtual void AssertAllNull(INodeFile inode, Path path, string[] snapshots
                                     )
 {
     NUnit.Framework.Assert.IsNull(inode.GetBlocks());
     AssertINodeNull(path.ToString());
     AssertINodeNullInSnapshots(path, snapshots);
 }
Beispiel #4
0
        public virtual void TestDeletionOfLaterBlocksWithZeroSizeFirstBlock()
        {
            Path foo = new Path("/foo");
            Path bar = new Path(foo, "bar");

            byte[] testData = Sharpen.Runtime.GetBytesForString("foo bar baz");
            // Create a zero-length file.
            DFSTestUtil.CreateFile(hdfs, bar, 0, Replication, 0L);
            NUnit.Framework.Assert.AreEqual(0, fsdir.GetINode4Write(bar.ToString()).AsFile().
                                            GetBlocks().Length);
            // Create a snapshot that includes that file.
            SnapshotTestHelper.CreateSnapshot(hdfs, foo, "s0");
            // Extend that file.
            FSDataOutputStream @out = hdfs.Append(bar);

            @out.Write(testData);
            @out.Close();
            INodeFile barNode = fsdir.GetINode4Write(bar.ToString()).AsFile();

            BlockInfoContiguous[] blks = barNode.GetBlocks();
            NUnit.Framework.Assert.AreEqual(1, blks.Length);
            NUnit.Framework.Assert.AreEqual(testData.Length, blks[0].GetNumBytes());
            // Delete the file.
            hdfs.Delete(bar, true);
            // Now make sure that the NN can still save an fsimage successfully.
            cluster.GetNameNode().GetRpcServer().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter
                                                             , false);
            cluster.GetNameNode().GetRpcServer().SaveNamespace();
        }
        public virtual void TestBlockReceived()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 1024);
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(DatanodeCount).Build();
                cluster.WaitActive();
                DistributedFileSystem hdfs       = cluster.GetFileSystem();
                FSNamesystem          fsn        = cluster.GetNamesystem();
                BlockManager          blkManager = fsn.GetBlockManager();
                string file       = "/tmp.txt";
                Path   filePath   = new Path(file);
                short  replFactor = 1;
                DFSTestUtil.CreateFile(hdfs, filePath, 1024L, replFactor, 0);
                // temporarily stop the heartbeat
                AList <DataNode> datanodes = cluster.GetDataNodes();
                for (int i = 0; i < DatanodeCount; i++)
                {
                    DataNodeTestUtils.SetHeartbeatsDisabledForTests(datanodes[i], true);
                }
                hdfs.SetReplication(filePath, (short)DatanodeCount);
                BlockManagerTestUtil.ComputeAllPendingWork(blkManager);
                NUnit.Framework.Assert.AreEqual(1, blkManager.pendingReplications.Size());
                INodeFile fileNode = fsn.GetFSDirectory().GetINode4Write(file).AsFile();
                Block[]   blocks   = fileNode.GetBlocks();
                NUnit.Framework.Assert.AreEqual(DatanodeCount - 1, blkManager.pendingReplications
                                                .GetNumReplicas(blocks[0]));
                LocatedBlock locatedBlock = hdfs.GetClient().GetLocatedBlocks(file, 0).Get(0);
                DatanodeInfo existingDn   = (locatedBlock.GetLocations())[0];
                int          reportDnNum  = 0;
                string       poolId       = cluster.GetNamesystem().GetBlockPoolId();
                // let two datanodes (other than the one that already has the data) to
                // report to NN
                for (int i_1 = 0; i_1 < DatanodeCount && reportDnNum < 2; i_1++)
                {
                    if (!datanodes[i_1].GetDatanodeId().Equals(existingDn))
                    {
                        DatanodeRegistration           dnR    = datanodes[i_1].GetDNRegistrationForBP(poolId);
                        StorageReceivedDeletedBlocks[] report = new StorageReceivedDeletedBlocks[] { new
                                                                                                     StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored", new ReceivedDeletedBlockInfo
                                                                                                                                  [] { new ReceivedDeletedBlockInfo(blocks[0], ReceivedDeletedBlockInfo.BlockStatus
                                                                                                                                                                    .ReceivedBlock, string.Empty) }) };
                        cluster.GetNameNodeRpc().BlockReceivedAndDeleted(dnR, poolId, report);
                        reportDnNum++;
                    }
                }
                NUnit.Framework.Assert.AreEqual(DatanodeCount - 3, blkManager.pendingReplications
                                                .GetNumReplicas(blocks[0]));
                // let the same datanodes report again
                for (int i_2 = 0; i_2 < DatanodeCount && reportDnNum < 2; i_2++)
                {
                    if (!datanodes[i_2].GetDatanodeId().Equals(existingDn))
                    {
                        DatanodeRegistration           dnR    = datanodes[i_2].GetDNRegistrationForBP(poolId);
                        StorageReceivedDeletedBlocks[] report = new StorageReceivedDeletedBlocks[] { new
                                                                                                     StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored", new ReceivedDeletedBlockInfo
                                                                                                                                  [] { new ReceivedDeletedBlockInfo(blocks[0], ReceivedDeletedBlockInfo.BlockStatus
                                                                                                                                                                    .ReceivedBlock, string.Empty) }) };
                        cluster.GetNameNodeRpc().BlockReceivedAndDeleted(dnR, poolId, report);
                        reportDnNum++;
                    }
                }
                NUnit.Framework.Assert.AreEqual(DatanodeCount - 3, blkManager.pendingReplications
                                                .GetNumReplicas(blocks[0]));
                // re-enable heartbeat for the datanode that has data
                for (int i_3 = 0; i_3 < DatanodeCount; i_3++)
                {
                    DataNodeTestUtils.SetHeartbeatsDisabledForTests(datanodes[i_3], false);
                    DataNodeTestUtils.TriggerHeartbeat(datanodes[i_3]);
                }
                Sharpen.Thread.Sleep(5000);
                NUnit.Framework.Assert.AreEqual(0, blkManager.pendingReplications.Size());
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Beispiel #6
0
        /// <summary>Test deleting a file with snapshots.</summary>
        /// <remarks>
        /// Test deleting a file with snapshots. Need to check the blocksMap to make
        /// sure the corresponding record is updated correctly.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestDeletionWithSnapshots()
        {
            Path file0 = new Path(sub1, "file0");
            Path file1 = new Path(sub1, "file1");
            Path sub2  = new Path(sub1, "sub2");
            Path file2 = new Path(sub2, "file2");
            Path file3 = new Path(sub1, "file3");
            Path file4 = new Path(sub1, "file4");
            Path file5 = new Path(sub1, "file5");

            // Create file under sub1
            DFSTestUtil.CreateFile(hdfs, file0, 4 * Blocksize, Replication, seed);
            DFSTestUtil.CreateFile(hdfs, file1, 2 * Blocksize, Replication, seed);
            DFSTestUtil.CreateFile(hdfs, file2, 3 * Blocksize, Replication, seed);
            {
                // Normal deletion
                INodeFile             f2     = AssertBlockCollection(file2.ToString(), 3, fsdir, blockmanager);
                BlockInfoContiguous[] blocks = f2.GetBlocks();
                hdfs.Delete(sub2, true);
                // The INode should have been removed from the blocksMap
                foreach (BlockInfoContiguous b in blocks)
                {
                    NUnit.Framework.Assert.IsNull(blockmanager.GetBlockCollection(b));
                }
            }
            // Create snapshots for sub1
            string[] snapshots = new string[] { "s0", "s1", "s2" };
            DFSTestUtil.CreateFile(hdfs, file3, 5 * Blocksize, Replication, seed);
            SnapshotTestHelper.CreateSnapshot(hdfs, sub1, snapshots[0]);
            DFSTestUtil.CreateFile(hdfs, file4, 1 * Blocksize, Replication, seed);
            SnapshotTestHelper.CreateSnapshot(hdfs, sub1, snapshots[1]);
            DFSTestUtil.CreateFile(hdfs, file5, 7 * Blocksize, Replication, seed);
            SnapshotTestHelper.CreateSnapshot(hdfs, sub1, snapshots[2]);
            {
                // set replication so that the inode should be replaced for snapshots
                INodeFile f1 = AssertBlockCollection(file1.ToString(), 2, fsdir, blockmanager);
                NUnit.Framework.Assert.AreSame(typeof(INodeFile), f1.GetType());
                hdfs.SetReplication(file1, (short)2);
                f1 = AssertBlockCollection(file1.ToString(), 2, fsdir, blockmanager);
                NUnit.Framework.Assert.IsTrue(f1.IsWithSnapshot());
                NUnit.Framework.Assert.IsFalse(f1.IsUnderConstruction());
            }
            // Check the block information for file0
            INodeFile f0 = AssertBlockCollection(file0.ToString(), 4, fsdir, blockmanager);

            BlockInfoContiguous[] blocks0 = f0.GetBlocks();
            // Also check the block information for snapshot of file0
            Path snapshotFile0 = SnapshotTestHelper.GetSnapshotPath(sub1, "s0", file0.GetName
                                                                        ());

            AssertBlockCollection(snapshotFile0.ToString(), 4, fsdir, blockmanager);
            // Delete file0
            hdfs.Delete(file0, true);
            // Make sure the blocks of file0 is still in blocksMap
            foreach (BlockInfoContiguous b_1 in blocks0)
            {
                NUnit.Framework.Assert.IsNotNull(blockmanager.GetBlockCollection(b_1));
            }
            AssertBlockCollection(snapshotFile0.ToString(), 4, fsdir, blockmanager);
            // Compare the INode in the blocksMap with INodes for snapshots
            string s1f0 = SnapshotTestHelper.GetSnapshotPath(sub1, "s1", file0.GetName()).ToString
                              ();

            AssertBlockCollection(s1f0, 4, fsdir, blockmanager);
            // Delete snapshot s1
            hdfs.DeleteSnapshot(sub1, "s1");
            // Make sure the first block of file0 is still in blocksMap
            foreach (BlockInfoContiguous b_2 in blocks0)
            {
                NUnit.Framework.Assert.IsNotNull(blockmanager.GetBlockCollection(b_2));
            }
            AssertBlockCollection(snapshotFile0.ToString(), 4, fsdir, blockmanager);
            try
            {
                INodeFile.ValueOf(fsdir.GetINode(s1f0), s1f0);
                NUnit.Framework.Assert.Fail("Expect FileNotFoundException when identifying the INode in a deleted Snapshot"
                                            );
            }
            catch (IOException e)
            {
                GenericTestUtils.AssertExceptionContains("File does not exist: " + s1f0, e);
            }
        }
        /// <summary>Apply a CacheDirective to a file.</summary>
        /// <param name="directive">The CacheDirective to apply.</param>
        /// <param name="file">The file.</param>
        private void RescanFile(CacheDirective directive, INodeFile file)
        {
            BlockInfoContiguous[] blockInfos = file.GetBlocks();
            // Increment the "needed" statistics
            directive.AddFilesNeeded(1);
            // We don't cache UC blocks, don't add them to the total here
            long neededTotal = file.ComputeFileSizeNotIncludingLastUcBlock() * directive.GetReplication
                                   ();

            directive.AddBytesNeeded(neededTotal);
            // The pool's bytesNeeded is incremented as we scan. If the demand
            // thus far plus the demand of this file would exceed the pool's limit,
            // do not cache this file.
            CachePool pool = directive.GetPool();

            if (pool.GetBytesNeeded() > pool.GetLimit())
            {
                Log.Debug("Directive {}: not scanning file {} because " + "bytesNeeded for pool {} is {}, but the pool's limit is {}"
                          , directive.GetId(), file.GetFullPathName(), pool.GetPoolName(), pool.GetBytesNeeded
                              (), pool.GetLimit());
                return;
            }
            long cachedTotal = 0;

            foreach (BlockInfoContiguous blockInfo in blockInfos)
            {
                if (!blockInfo.GetBlockUCState().Equals(HdfsServerConstants.BlockUCState.Complete
                                                        ))
                {
                    // We don't try to cache blocks that are under construction.
                    Log.Trace("Directive {}: can't cache block {} because it is in state " + "{}, not COMPLETE."
                              , directive.GetId(), blockInfo, blockInfo.GetBlockUCState());
                    continue;
                }
                Block       block   = new Block(blockInfo.GetBlockId());
                CachedBlock ncblock = new CachedBlock(block.GetBlockId(), directive.GetReplication
                                                          (), mark);
                CachedBlock ocblock = cachedBlocks.Get(ncblock);
                if (ocblock == null)
                {
                    cachedBlocks.Put(ncblock);
                    ocblock = ncblock;
                }
                else
                {
                    // Update bytesUsed using the current replication levels.
                    // Assumptions: we assume that all the blocks are the same length
                    // on each datanode.  We can assume this because we're only caching
                    // blocks in state COMPLETE.
                    // Note that if two directives are caching the same block(s), they will
                    // both get them added to their bytesCached.
                    IList <DatanodeDescriptor> cachedOn = ocblock.GetDatanodes(DatanodeDescriptor.CachedBlocksList.Type
                                                                               .Cached);
                    long cachedByBlock = Math.Min(cachedOn.Count, directive.GetReplication()) * blockInfo
                                         .GetNumBytes();
                    cachedTotal += cachedByBlock;
                    if ((mark != ocblock.GetMark()) || (ocblock.GetReplication() < directive.GetReplication
                                                            ()))
                    {
                        //
                        // Overwrite the block's replication and mark in two cases:
                        //
                        // 1. If the mark on the CachedBlock is different from the mark for
                        // this scan, that means the block hasn't been updated during this
                        // scan, and we should overwrite whatever is there, since it is no
                        // longer valid.
                        //
                        // 2. If the replication in the CachedBlock is less than what the
                        // directive asks for, we want to increase the block's replication
                        // field to what the directive asks for.
                        //
                        ocblock.SetReplicationAndMark(directive.GetReplication(), mark);
                    }
                }
                Log.Trace("Directive {}: setting replication for block {} to {}", directive.GetId
                              (), blockInfo, ocblock.GetReplication());
            }
            // Increment the "cached" statistics
            directive.AddBytesCached(cachedTotal);
            if (cachedTotal == neededTotal)
            {
                directive.AddFilesCached(1);
            }
            Log.Debug("Directive {}: caching {}: {}/{} bytes", directive.GetId(), file.GetFullPathName
                          (), cachedTotal, neededTotal);
        }