예제 #1
0
        /// <summary>Convert LocatedBlocks to a Json string.</summary>
        /// <exception cref="System.IO.IOException"/>
        public static string ToJsonString(LocatedBlocks locatedblocks)
        {
            if (locatedblocks == null)
            {
                return(null);
            }
            IDictionary <string, object> m = new SortedDictionary <string, object>();

            m["fileLength"]          = locatedblocks.GetFileLength();
            m["isUnderConstruction"] = locatedblocks.IsUnderConstruction();
            m["locatedBlocks"]       = ToJsonArray(locatedblocks.GetLocatedBlocks());
            m["lastLocatedBlock"]    = ToJsonMap(locatedblocks.GetLastLocatedBlock());
            m["isLastBlockComplete"] = locatedblocks.IsLastBlockComplete();
            return(ToJsonString(typeof(LocatedBlocks), m));
        }
예제 #2
0
            /// <returns>true if it is necessary to run another round of migration</returns>
            private bool ProcessFile(string fullPath, HdfsLocatedFileStatus status)
            {
                byte policyId = status.GetStoragePolicy();

                // currently we ignore files with unspecified storage policy
                if (policyId == BlockStoragePolicySuite.IdUnspecified)
                {
                    return(false);
                }
                BlockStoragePolicy policy = this._enclosing.blockStoragePolicies[policyId];

                if (policy == null)
                {
                    Org.Apache.Hadoop.Hdfs.Server.Mover.Mover.Log.Warn("Failed to get the storage policy of file "
                                                                       + fullPath);
                    return(false);
                }
                IList <StorageType> types         = policy.ChooseStorageTypes(status.GetReplication());
                LocatedBlocks       locatedBlocks = status.GetBlockLocations();
                bool hasRemaining        = false;
                bool lastBlkComplete     = locatedBlocks.IsLastBlockComplete();
                IList <LocatedBlock> lbs = locatedBlocks.GetLocatedBlocks();

                for (int i = 0; i < lbs.Count; i++)
                {
                    if (i == lbs.Count - 1 && !lastBlkComplete)
                    {
                        // last block is incomplete, skip it
                        continue;
                    }
                    LocatedBlock          lb   = lbs[i];
                    Mover.StorageTypeDiff diff = new Mover.StorageTypeDiff(types, lb.GetStorageTypes(
                                                                               ));
                    if (!diff.RemoveOverlap(true))
                    {
                        if (this.ScheduleMoves4Block(diff, lb))
                        {
                            hasRemaining |= (diff.existing.Count > 1 && diff.expected.Count > 1);
                        }
                    }
                }
                return(hasRemaining);
            }
        public virtual void TestGetBlockLocations()
        {
            Path root = new Path("/");
            Path file = new Path("/file");

            DFSTestUtil.CreateFile(hdfs, file, Blocksize, Replication, seed);
            // take a snapshot on root
            SnapshotTestHelper.CreateSnapshot(hdfs, root, "s1");
            Path fileInSnapshot = SnapshotTestHelper.GetSnapshotPath(root, "s1", file.GetName
                                                                         ());
            FileStatus status = hdfs.GetFileStatus(fileInSnapshot);

            // make sure we record the size for the file
            NUnit.Framework.Assert.AreEqual(Blocksize, status.GetLen());
            // append data to file
            DFSTestUtil.AppendFile(hdfs, file, Blocksize - 1);
            status = hdfs.GetFileStatus(fileInSnapshot);
            // the size of snapshot file should still be BLOCKSIZE
            NUnit.Framework.Assert.AreEqual(Blocksize, status.GetLen());
            // the size of the file should be (2 * BLOCKSIZE - 1)
            status = hdfs.GetFileStatus(file);
            NUnit.Framework.Assert.AreEqual(Blocksize * 2 - 1, status.GetLen());
            // call DFSClient#callGetBlockLocations for the file in snapshot
            LocatedBlocks blocks = DFSClientAdapter.CallGetBlockLocations(cluster.GetNameNodeRpc
                                                                              (), fileInSnapshot.ToString(), 0, long.MaxValue);
            IList <LocatedBlock> blockList = blocks.GetLocatedBlocks();

            // should be only one block
            NUnit.Framework.Assert.AreEqual(Blocksize, blocks.GetFileLength());
            NUnit.Framework.Assert.AreEqual(1, blockList.Count);
            // check the last block
            LocatedBlock lastBlock = blocks.GetLastLocatedBlock();

            NUnit.Framework.Assert.AreEqual(0, lastBlock.GetStartOffset());
            NUnit.Framework.Assert.AreEqual(Blocksize, lastBlock.GetBlockSize());
            // take another snapshot
            SnapshotTestHelper.CreateSnapshot(hdfs, root, "s2");
            Path fileInSnapshot2 = SnapshotTestHelper.GetSnapshotPath(root, "s2", file.GetName
                                                                          ());
            // append data to file without closing
            HdfsDataOutputStream @out = AppendFileWithoutClosing(file, Blocksize);

            @out.Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag.UpdateLength));
            status = hdfs.GetFileStatus(fileInSnapshot2);
            // the size of snapshot file should be BLOCKSIZE*2-1
            NUnit.Framework.Assert.AreEqual(Blocksize * 2 - 1, status.GetLen());
            // the size of the file should be (3 * BLOCKSIZE - 1)
            status = hdfs.GetFileStatus(file);
            NUnit.Framework.Assert.AreEqual(Blocksize * 3 - 1, status.GetLen());
            blocks = DFSClientAdapter.CallGetBlockLocations(cluster.GetNameNodeRpc(), fileInSnapshot2
                                                            .ToString(), 0, long.MaxValue);
            NUnit.Framework.Assert.IsFalse(blocks.IsUnderConstruction());
            NUnit.Framework.Assert.IsTrue(blocks.IsLastBlockComplete());
            blockList = blocks.GetLocatedBlocks();
            // should be 2 blocks
            NUnit.Framework.Assert.AreEqual(Blocksize * 2 - 1, blocks.GetFileLength());
            NUnit.Framework.Assert.AreEqual(2, blockList.Count);
            // check the last block
            lastBlock = blocks.GetLastLocatedBlock();
            NUnit.Framework.Assert.AreEqual(Blocksize, lastBlock.GetStartOffset());
            NUnit.Framework.Assert.AreEqual(Blocksize, lastBlock.GetBlockSize());
            blocks = DFSClientAdapter.CallGetBlockLocations(cluster.GetNameNodeRpc(), fileInSnapshot2
                                                            .ToString(), Blocksize, 0);
            blockList = blocks.GetLocatedBlocks();
            NUnit.Framework.Assert.AreEqual(1, blockList.Count);
            // check blocks for file being written
            blocks = DFSClientAdapter.CallGetBlockLocations(cluster.GetNameNodeRpc(), file.ToString
                                                                (), 0, long.MaxValue);
            blockList = blocks.GetLocatedBlocks();
            NUnit.Framework.Assert.AreEqual(3, blockList.Count);
            NUnit.Framework.Assert.IsTrue(blocks.IsUnderConstruction());
            NUnit.Framework.Assert.IsFalse(blocks.IsLastBlockComplete());
            lastBlock = blocks.GetLastLocatedBlock();
            NUnit.Framework.Assert.AreEqual(Blocksize * 2, lastBlock.GetStartOffset());
            NUnit.Framework.Assert.AreEqual(Blocksize - 1, lastBlock.GetBlockSize());
            @out.Close();
        }