/// <summary>Calculate the usable size of a shared memory segment.</summary> /// <remarks> /// Calculate the usable size of a shared memory segment. /// We round down to a multiple of the slot size and do some validation. /// </remarks> /// <param name="stream">The stream we're using.</param> /// <returns>The usable size of the shared memory segment.</returns> /// <exception cref="System.IO.IOException"/> private static int GetUsableLength(FileInputStream stream) { int intSize = Ints.CheckedCast(stream.GetChannel().Size()); int slots = intSize / BytesPerSlot; if (slots == 0) { throw new IOException("size of shared memory segment was " + intSize + ", but that is not enough to hold even one slot." ); } return(slots * BytesPerSlot); }
public TopConf(Configuration conf) { isEnabled = conf.GetBoolean(DFSConfigKeys.NntopEnabledKey, DFSConfigKeys.NntopEnabledDefault ); string[] periodsStr = conf.GetTrimmedStrings(DFSConfigKeys.NntopWindowsMinutesKey , DFSConfigKeys.NntopWindowsMinutesDefault); nntopReportingPeriodsMs = new int[periodsStr.Length]; for (int i = 0; i < periodsStr.Length; i++) { nntopReportingPeriodsMs[i] = Ints.CheckedCast(TimeUnit.Minutes.ToMillis(System.Convert.ToInt32 (periodsStr[i]))); } foreach (int aPeriodMs in nntopReportingPeriodsMs) { Preconditions.CheckArgument(aPeriodMs >= TimeUnit.Minutes.ToMillis(1), "minimum reporting period is 1 min!" ); } }
/// <exception cref="System.Exception"/> public virtual void TestReCacheAfterUncache() { int TotalBlocksPerCache = Ints.CheckedCast(CacheCapacity / BlockSize); BlockReaderTestUtil.EnableHdfsCachingTracing(); NUnit.Framework.Assert.AreEqual(0, CacheCapacity % BlockSize); // Create a small file Path SmallFile = new Path("/smallFile"); DFSTestUtil.CreateFile(fs, SmallFile, BlockSize, (short)1, unchecked ((int)(0xcafe ))); // Create a file that will take up the whole cache Path BigFile = new Path("/bigFile"); DFSTestUtil.CreateFile(fs, BigFile, TotalBlocksPerCache * BlockSize, (short)1, unchecked ( (int)(0xbeef))); DistributedFileSystem dfs = cluster.GetFileSystem(); dfs.AddCachePool(new CachePoolInfo("pool")); long bigCacheDirectiveId = dfs.AddCacheDirective(new CacheDirectiveInfo.Builder() .SetPool("pool").SetPath(BigFile).SetReplication((short)1).Build()); GenericTestUtils.WaitFor(new _Supplier_532(TotalBlocksPerCache), 1000, 30000); // Try to cache a smaller file. It should fail. long shortCacheDirectiveId = dfs.AddCacheDirective(new CacheDirectiveInfo.Builder ().SetPool("pool").SetPath(SmallFile).SetReplication((short)1).Build()); Sharpen.Thread.Sleep(10000); MetricsRecordBuilder dnMetrics = MetricsAsserts.GetMetrics(dn.GetMetrics().Name() ); NUnit.Framework.Assert.AreEqual(TotalBlocksPerCache, MetricsAsserts.GetLongCounter ("BlocksCached", dnMetrics)); // Uncache the big file and verify that the small file can now be // cached (regression test for HDFS-6107) dfs.RemoveCacheDirective(bigCacheDirectiveId); GenericTestUtils.WaitFor(new _Supplier_560(dfs, shortCacheDirectiveId), 1000, 30000 ); dfs.RemoveCacheDirective(shortCacheDirectiveId); }
/// <summary>Get the Slot index.</summary> /// <returns>The index of this slot.</returns> public virtual int GetSlotIdx() { return(Ints.CheckedCast((this.slotAddress - this._enclosing.baseAddress) / ShortCircuitShm .BytesPerSlot)); }