Example #1
0
 private RemoteBlockReader(string file, string bpid, long blockId, DataInputStream
                           @in, DataChecksum checksum, bool verifyChecksum, long startOffset, long firstChunkOffset
                           , long bytesToRead, Peer peer, DatanodeID datanodeID, PeerCache peerCache)
     : base(new Path("/" + Block.BlockFilePrefix + blockId + ":" + bpid + ":of:" + file
                     ), 1, verifyChecksum, checksum.GetChecksumSize() > 0 ? checksum : null, checksum
            .GetBytesPerChecksum(), checksum.GetChecksumSize())
 {
     // Path is used only for printing block and file information in debug
     /*too non path-like?*/
     this.isLocal = DFSClient.IsLocalAddress(NetUtils.CreateSocketAddr(datanodeID.GetXferAddr
                                                                           ()));
     this.peer        = peer;
     this.datanodeID  = datanodeID;
     this.@in         = @in;
     this.checksum    = checksum;
     this.startOffset = Math.Max(startOffset, 0);
     this.blockId     = blockId;
     // The total number of bytes that we need to transfer from the DN is
     // the amount that the user wants (bytesToRead), plus the padding at
     // the beginning in order to chunk-align. Note that the DN may elect
     // to send more than this amount if the read starts/ends mid-chunk.
     this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);
     this.firstChunkOffset    = firstChunkOffset;
     lastChunkOffset          = firstChunkOffset;
     lastChunkLen             = -1;
     bytesPerChecksum         = this.checksum.GetBytesPerChecksum();
     checksumSize             = this.checksum.GetChecksumSize();
     this.peerCache           = peerCache;
 }
        /// <summary>Test that the client respects its keepalive timeout.</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestClientResponsesKeepAliveTimeout()
        {
            Configuration clientConf = new Configuration(conf);
            // Set a client socket cache expiry time much shorter than
            // the datanode-side expiration time.
            long ClientExpiryMs = 10L;

            clientConf.SetLong(DFSConfigKeys.DfsClientSocketCacheExpiryMsecKey, ClientExpiryMs
                               );
            clientConf.Set(DFSConfigKeys.DfsClientContext, "testClientResponsesKeepAliveTimeout"
                           );
            DistributedFileSystem fs = (DistributedFileSystem)FileSystem.Get(cluster.GetURI()
                                                                             , clientConf);
            PeerCache peerCache = ClientContext.GetFromConf(clientConf).GetPeerCache();

            DFSTestUtil.CreateFile(fs, TestFile, 1L, (short)1, 0L);
            // Clients that write aren't currently re-used.
            NUnit.Framework.Assert.AreEqual(0, peerCache.Size());
            AssertXceiverCount(0);
            // Reads the file, so we should get a
            // cached socket, and should have an xceiver on the other side.
            DFSTestUtil.ReadFile(fs, TestFile);
            NUnit.Framework.Assert.AreEqual(1, peerCache.Size());
            AssertXceiverCount(1);
            // Sleep for a bit longer than the client keepalive timeout.
            Sharpen.Thread.Sleep(ClientExpiryMs + 50);
            // Taking out a peer which is expired should give a null.
            Peer peer = peerCache.Get(dn.GetDatanodeId(), false);

            NUnit.Framework.Assert.IsTrue(peer == null);
            // The socket cache is now empty.
            NUnit.Framework.Assert.AreEqual(0, peerCache.Size());
        }
Example #3
0
        public virtual void TestMultiplePeersWithSameKey()
        {
            int        Capacity = 3;
            PeerCache  cache    = new PeerCache(Capacity, 100000);
            DatanodeID dnId     = new DatanodeID("192.168.0.1", "fakehostname", "fake_datanode_id"
                                                 , 100, 101, 102, 103);
            HashMultiset <TestPeerCache.FakePeer> peers = HashMultiset.Create(Capacity);

            for (int i = 0; i < Capacity; ++i)
            {
                TestPeerCache.FakePeer peer = new TestPeerCache.FakePeer(dnId, false);
                peers.AddItem(peer);
                cache.Put(dnId, peer);
            }
            // Check that all of the peers ended up in the cache
            NUnit.Framework.Assert.AreEqual(Capacity, cache.Size());
            while (!peers.IsEmpty())
            {
                Peer peer = cache.Get(dnId, false);
                NUnit.Framework.Assert.IsTrue(peer != null);
                NUnit.Framework.Assert.IsTrue(!peer.IsClosed());
                peers.Remove(peer);
            }
            NUnit.Framework.Assert.AreEqual(0, cache.Size());
            cache.Close();
        }
Example #4
0
        public virtual void TestEviction()
        {
            int       Capacity = 3;
            PeerCache cache    = new PeerCache(Capacity, 100000);

            DatanodeID[]             dnIds = new DatanodeID[Capacity + 1];
            TestPeerCache.FakePeer[] peers = new TestPeerCache.FakePeer[Capacity + 1];
            for (int i = 0; i < dnIds.Length; ++i)
            {
                dnIds[i] = new DatanodeID("192.168.0.1", "fakehostname_" + i, "fake_datanode_id_"
                                          + i, 100, 101, 102, 103);
                peers[i] = new TestPeerCache.FakePeer(dnIds[i], false);
            }
            for (int i_1 = 0; i_1 < Capacity; ++i_1)
            {
                cache.Put(dnIds[i_1], peers[i_1]);
            }
            // Check that the peers are cached
            NUnit.Framework.Assert.AreEqual(Capacity, cache.Size());
            // Add another entry and check that the first entry was evicted
            cache.Put(dnIds[Capacity], peers[Capacity]);
            NUnit.Framework.Assert.AreEqual(Capacity, cache.Size());
            NUnit.Framework.Assert.AreSame(null, cache.Get(dnIds[0], false));
            // Make sure that the other entries are still there
            for (int i_2 = 1; i_2 < Capacity; ++i_2)
            {
                Peer peer = cache.Get(dnIds[i_2], false);
                NUnit.Framework.Assert.AreSame(peers[i_2], peer);
                NUnit.Framework.Assert.IsTrue(!peer.IsClosed());
                peer.Close();
            }
            NUnit.Framework.Assert.AreEqual(1, cache.Size());
            cache.Close();
        }
Example #5
0
        public virtual void TestExpiry()
        {
            int       Capacity     = 3;
            int       ExpiryPeriod = 10;
            PeerCache cache        = new PeerCache(Capacity, ExpiryPeriod);

            DatanodeID[]             dnIds = new DatanodeID[Capacity];
            TestPeerCache.FakePeer[] peers = new TestPeerCache.FakePeer[Capacity];
            for (int i = 0; i < Capacity; ++i)
            {
                dnIds[i] = new DatanodeID("192.168.0.1", "fakehostname_" + i, "fake_datanode_id",
                                          100, 101, 102, 103);
                peers[i] = new TestPeerCache.FakePeer(dnIds[i], false);
            }
            for (int i_1 = 0; i_1 < Capacity; ++i_1)
            {
                cache.Put(dnIds[i_1], peers[i_1]);
            }
            // Wait for the peers to expire
            Sharpen.Thread.Sleep(ExpiryPeriod * 50);
            NUnit.Framework.Assert.AreEqual(0, cache.Size());
            // make sure that the peers were closed when they were expired
            for (int i_2 = 0; i_2 < Capacity; ++i_2)
            {
                NUnit.Framework.Assert.IsTrue(peers[i_2].IsClosed());
            }
            // sleep for another second and see if
            // the daemon thread runs fine on empty cache
            Sharpen.Thread.Sleep(ExpiryPeriod * 50);
            cache.Close();
        }
Example #6
0
        public virtual void TestAddAndRetrieve()
        {
            PeerCache  cache = new PeerCache(3, 100000);
            DatanodeID dnId  = new DatanodeID("192.168.0.1", "fakehostname", "fake_datanode_id"
                                              , 100, 101, 102, 103);

            TestPeerCache.FakePeer peer = new TestPeerCache.FakePeer(dnId, false);
            cache.Put(dnId, peer);
            NUnit.Framework.Assert.IsTrue(!peer.IsClosed());
            NUnit.Framework.Assert.AreEqual(1, cache.Size());
            NUnit.Framework.Assert.AreEqual(peer, cache.Get(dnId, false));
            NUnit.Framework.Assert.AreEqual(0, cache.Size());
            cache.Close();
        }
Example #7
0
 private ClientContext(string name, DFSClient.Conf conf)
 {
     this.name              = name;
     this.confString        = ConfAsString(conf);
     this.shortCircuitCache = new ShortCircuitCache(conf.shortCircuitStreamsCacheSize,
                                                    conf.shortCircuitStreamsCacheExpiryMs, conf.shortCircuitMmapCacheSize, conf.shortCircuitMmapCacheExpiryMs
                                                    , conf.shortCircuitMmapCacheRetryTimeout, conf.shortCircuitCacheStaleThresholdMs
                                                    , conf.shortCircuitSharedMemoryWatcherInterruptCheckMs);
     this.peerCache                 = new PeerCache(conf.socketCacheCapacity, conf.socketCacheExpiry);
     this.keyProviderCache          = new KeyProviderCache(conf.keyProviderCacheExpiryMs);
     this.useLegacyBlockReaderLocal = conf.useLegacyBlockReaderLocal;
     this.domainSocketFactory       = new DomainSocketFactory(conf);
     this.byteArrayManager          = ByteArrayManager.NewInstance(conf.writeByteArrayManagerConf
                                                                   );
 }
        /// <summary>Regression test for HDFS-3357.</summary>
        /// <remarks>
        /// Regression test for HDFS-3357. Check that the datanode is respecting
        /// its configured keepalive timeout.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestDatanodeRespectsKeepAliveTimeout()
        {
            Configuration clientConf = new Configuration(conf);
            // Set a client socket cache expiry time much longer than
            // the datanode-side expiration time.
            long ClientExpiryMs = 60000L;

            clientConf.SetLong(DFSConfigKeys.DfsClientSocketCacheExpiryMsecKey, ClientExpiryMs
                               );
            clientConf.Set(DFSConfigKeys.DfsClientContext, "testDatanodeRespectsKeepAliveTimeout"
                           );
            DistributedFileSystem fs = (DistributedFileSystem)FileSystem.Get(cluster.GetURI()
                                                                             , clientConf);
            PeerCache peerCache = ClientContext.GetFromConf(clientConf).GetPeerCache();

            DFSTestUtil.CreateFile(fs, TestFile, 1L, (short)1, 0L);
            // Clients that write aren't currently re-used.
            NUnit.Framework.Assert.AreEqual(0, peerCache.Size());
            AssertXceiverCount(0);
            // Reads the file, so we should get a
            // cached socket, and should have an xceiver on the other side.
            DFSTestUtil.ReadFile(fs, TestFile);
            NUnit.Framework.Assert.AreEqual(1, peerCache.Size());
            AssertXceiverCount(1);
            // Sleep for a bit longer than the keepalive timeout
            // and make sure the xceiver died.
            Sharpen.Thread.Sleep(DFSConfigKeys.DfsDatanodeSocketReuseKeepaliveDefault + 50);
            AssertXceiverCount(0);
            // The socket is still in the cache, because we don't
            // notice that it's closed until we try to read
            // from it again.
            NUnit.Framework.Assert.AreEqual(1, peerCache.Size());
            // Take it out of the cache - reading should
            // give an EOF.
            Peer peer = peerCache.Get(dn.GetDatanodeId(), false);

            NUnit.Framework.Assert.IsNotNull(peer);
            NUnit.Framework.Assert.AreEqual(-1, peer.GetInputStream().Read());
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestManyClosedSocketsInCache()
        {
            // Make a small file
            Configuration clientConf = new Configuration(conf);

            clientConf.Set(DFSConfigKeys.DfsClientContext, "testManyClosedSocketsInCache");
            DistributedFileSystem fs = (DistributedFileSystem)FileSystem.Get(cluster.GetURI()
                                                                             , clientConf);
            PeerCache peerCache = ClientContext.GetFromConf(clientConf).GetPeerCache();

            DFSTestUtil.CreateFile(fs, TestFile, 1L, (short)1, 0L);
            // Insert a bunch of dead sockets in the cache, by opening
            // many streams concurrently, reading all of the data,
            // and then closing them.
            InputStream[] stms = new InputStream[5];
            try
            {
                for (int i = 0; i < stms.Length; i++)
                {
                    stms[i] = fs.Open(TestFile);
                }
                foreach (InputStream stm in stms)
                {
                    IOUtils.CopyBytes(stm, new IOUtils.NullOutputStream(), 1024);
                }
            }
            finally
            {
                IOUtils.Cleanup(null, stms);
            }
            NUnit.Framework.Assert.AreEqual(5, peerCache.Size());
            // Let all the xceivers timeout
            Sharpen.Thread.Sleep(1500);
            AssertXceiverCount(0);
            // Client side still has the sockets cached
            NUnit.Framework.Assert.AreEqual(5, peerCache.Size());
            // Reading should not throw an exception.
            DFSTestUtil.ReadFile(fs, TestFile);
        }
Example #10
0
        public virtual void TestDomainSocketPeers()
        {
            int        Capacity = 3;
            PeerCache  cache    = new PeerCache(Capacity, 100000);
            DatanodeID dnId     = new DatanodeID("192.168.0.1", "fakehostname", "fake_datanode_id"
                                                 , 100, 101, 102, 103);
            HashMultiset <TestPeerCache.FakePeer> peers = HashMultiset.Create(Capacity);

            for (int i = 0; i < Capacity; ++i)
            {
                TestPeerCache.FakePeer peer = new TestPeerCache.FakePeer(dnId, i == Capacity - 1);
                peers.AddItem(peer);
                cache.Put(dnId, peer);
            }
            // Check that all of the peers ended up in the cache
            NUnit.Framework.Assert.AreEqual(Capacity, cache.Size());
            // Test that get(requireDomainPeer=true) finds the peer with the
            // domain socket.
            Peer peer_1 = cache.Get(dnId, true);

            NUnit.Framework.Assert.IsTrue(peer_1.GetDomainSocket() != null);
            peers.Remove(peer_1);
            // Test that get(requireDomainPeer=true) returns null when there are
            // no more peers with domain sockets.
            peer_1 = cache.Get(dnId, true);
            NUnit.Framework.Assert.IsTrue(peer_1 == null);
            // Check that all of the other peers ended up in the cache.
            while (!peers.IsEmpty())
            {
                peer_1 = cache.Get(dnId, false);
                NUnit.Framework.Assert.IsTrue(peer_1 != null);
                NUnit.Framework.Assert.IsTrue(!peer_1.IsClosed());
                peers.Remove(peer_1);
            }
            NUnit.Framework.Assert.AreEqual(0, cache.Size());
            cache.Close();
        }
Example #11
0
 protected internal RemoteBlockReader2(string file, string bpid, long blockId, DataChecksum
                                       checksum, bool verifyChecksum, long startOffset, long firstChunkOffset, long bytesToRead
                                       , Peer peer, DatanodeID datanodeID, PeerCache peerCache)
 {
     this.isLocal = DFSClient.IsLocalAddress(NetUtils.CreateSocketAddr(datanodeID.GetXferAddr
                                                                           ()));
     // Path is used only for printing block and file information in debug
     this.peer           = peer;
     this.datanodeID     = datanodeID;
     this.@in            = peer.GetInputStreamChannel();
     this.checksum       = checksum;
     this.verifyChecksum = verifyChecksum;
     this.startOffset    = Math.Max(startOffset, 0);
     this.filename       = file;
     this.peerCache      = peerCache;
     this.blockId        = blockId;
     // The total number of bytes that we need to transfer from the DN is
     // the amount that the user wants (bytesToRead), plus the padding at
     // the beginning in order to chunk-align. Note that the DN may elect
     // to send more than this amount if the read starts/ends mid-chunk.
     this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);
     bytesPerChecksum         = this.checksum.GetBytesPerChecksum();
     checksumSize             = this.checksum.GetChecksumSize();
 }
Example #12
0
 public _Runnable_120(PeerCache _enclosing)
 {
     this._enclosing = _enclosing;
 }
Example #13
0
        /// <summary>Create a new BlockReader specifically to satisfy a read.</summary>
        /// <remarks>
        /// Create a new BlockReader specifically to satisfy a read.
        /// This method also sends the OP_READ_BLOCK request.
        /// </remarks>
        /// <param name="file">File location</param>
        /// <param name="block">The block object</param>
        /// <param name="blockToken">The block token for security</param>
        /// <param name="startOffset">The read offset, relative to block head</param>
        /// <param name="len">The number of bytes to read</param>
        /// <param name="bufferSize">The IO buffer size (not the client buffer size)</param>
        /// <param name="verifyChecksum">Whether to verify checksum</param>
        /// <param name="clientName">Client name</param>
        /// <returns>New BlockReader instance, or null on error.</returns>
        /// <exception cref="System.IO.IOException"/>
        public static Org.Apache.Hadoop.Hdfs.RemoteBlockReader NewBlockReader(string file
                                                                              , ExtendedBlock block, Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier
                                                                                                                                             > blockToken, long startOffset, long len, int bufferSize, bool verifyChecksum, string
                                                                              clientName, Peer peer, DatanodeID datanodeID, PeerCache peerCache, CachingStrategy
                                                                              cachingStrategy)
        {
            // in and out will be closed when sock is closed (by the caller)
            DataOutputStream @out = new DataOutputStream(new BufferedOutputStream(peer.GetOutputStream
                                                                                      ()));

            new Sender(@out).ReadBlock(block, blockToken, clientName, startOffset, len, verifyChecksum
                                       , cachingStrategy);
            //
            // Get bytes in block, set streams
            //
            DataInputStream @in = new DataInputStream(new BufferedInputStream(peer.GetInputStream
                                                                                  (), bufferSize));

            DataTransferProtos.BlockOpResponseProto status = DataTransferProtos.BlockOpResponseProto
                                                             .ParseFrom(PBHelper.VintPrefixed(@in));
            RemoteBlockReader2.CheckSuccess(status, peer, block, file);
            DataTransferProtos.ReadOpChecksumInfoProto checksumInfo = status.GetReadOpChecksumInfo
                                                                          ();
            DataChecksum checksum = DataTransferProtoUtil.FromProto(checksumInfo.GetChecksum(
                                                                        ));
            //Warning when we get CHECKSUM_NULL?
            // Read the first chunk offset.
            long firstChunkOffset = checksumInfo.GetChunkOffset();

            if (firstChunkOffset < 0 || firstChunkOffset > startOffset || firstChunkOffset <=
                (startOffset - checksum.GetBytesPerChecksum()))
            {
                throw new IOException("BlockReader: error in first chunk offset (" + firstChunkOffset
                                      + ") startOffset is " + startOffset + " for file " + file);
            }
            return(new Org.Apache.Hadoop.Hdfs.RemoteBlockReader(file, block.GetBlockPoolId(),
                                                                block.GetBlockId(), @in, checksum, verifyChecksum, startOffset, firstChunkOffset
                                                                , len, peer, datanodeID, peerCache));
        }