/// <exception cref="System.IO.IOException"/> public virtual void Write(K key, V value) { long bytesOutPrev = GetOutputBytes(fsStats); real.Write(key, value); long bytesOutCurr = GetOutputBytes(fsStats); fileOutputByteCounter.Increment(bytesOutCurr - bytesOutPrev); reduceOutputCounter.Increment(1); }
public virtual void TestCounterValue() { Counters counters = new Counters(); int NumberTests = 100; int NumberInc = 10; Random rand = new Random(); for (int i = 0; i < NumberTests; i++) { long initValue = rand.Next(); long expectedValue = initValue; Counters.Counter counter = counters.FindCounter("foo", "bar"); counter.SetValue(initValue); NUnit.Framework.Assert.AreEqual("Counter value is not initialized correctly", expectedValue , counter.GetValue()); for (int j = 0; j < NumberInc; j++) { int incValue = rand.Next(); counter.Increment(incValue); expectedValue += incValue; NUnit.Framework.Assert.AreEqual("Counter value is not incremented correctly", expectedValue , counter.GetValue()); } expectedValue = rand.Next(); counter.SetValue(expectedValue); NUnit.Framework.Assert.AreEqual("Counter value is not set correctly", expectedValue , counter.GetValue()); } }
/// <exception cref="System.IO.IOException"/> private void RunOldReducer <Inkey, Invalue, Outkey, Outvalue>(JobConf job, TaskUmbilicalProtocol umbilical, Task.TaskReporter reporter, RawKeyValueIterator rIter, RawComparator <INKEY> comparator) { System.Type keyClass = typeof(INKEY); System.Type valueClass = typeof(INVALUE); Reducer <INKEY, INVALUE, OUTKEY, OUTVALUE> reducer = ReflectionUtils.NewInstance(job .GetReducerClass(), job); // make output collector string finalName = GetOutputName(GetPartition()); RecordWriter <OUTKEY, OUTVALUE> @out = new ReduceTask.OldTrackingRecordWriter <OUTKEY , OUTVALUE>(this, job, reporter, finalName); RecordWriter <OUTKEY, OUTVALUE> finalOut = @out; OutputCollector <OUTKEY, OUTVALUE> collector = new _OutputCollector_419(finalOut, reporter); // indicate that progress update needs to be sent // apply reduce function try { //increment processed counter only if skipping feature is enabled bool incrProcCount = SkipBadRecords.GetReducerMaxSkipGroups(job) > 0 && SkipBadRecords .GetAutoIncrReducerProcCount(job); ReduceTask.ReduceValuesIterator <INKEY, INVALUE> values = IsSkipping() ? new ReduceTask.SkippingReduceValuesIterator <INKEY, INVALUE>(this, rIter, comparator, keyClass, valueClass, job, reporter, umbilical ) : new ReduceTask.ReduceValuesIterator <INKEY, INVALUE>(this, rIter, job.GetOutputValueGroupingComparator (), keyClass, valueClass, job, reporter); values.InformReduceProgress(); while (values.More()) { reduceInputKeyCounter.Increment(1); reducer.Reduce(values.GetKey(), values, collector, reporter); if (incrProcCount) { reporter.IncrCounter(SkipBadRecords.CounterGroup, SkipBadRecords.CounterReduceProcessedGroups , 1); } values.NextKey(); values.InformReduceProgress(); } reducer.Close(); reducer = null; @out.Close(reporter); @out = null; } finally { IOUtils.Cleanup(Log, reducer); CloseQuietly(@out, reporter); } }
/// <exception cref="System.IO.IOException"/> internal virtual void Init(Counters.Counter readsCounter) { if (reader == null) { FSDataInputStream @in = fs.Open(file); @in.Seek(segmentOffset); @in = CryptoUtils.WrapIfNecessary(conf, @in); reader = new IFile.Reader <K, V>(conf, @in, segmentLength - CryptoUtils.CryptoPadding (conf), codec, readsCounter); } if (mapOutputsCounter != null) { mapOutputsCounter.Increment(1); } }
/// <exception cref="System.IO.IOException"/> public virtual void Close() { // When IFile writer is created by BackupStore, we do not have // Key and Value classes set. So, check before closing the // serializers if (keyClass != null) { keySerializer.Close(); valueSerializer.Close(); } // Write EOF_MARKER for key/value length WritableUtils.WriteVInt(@out, EofMarker); WritableUtils.WriteVInt(@out, EofMarker); decompressedBytesWritten += 2 * WritableUtils.GetVIntSize(EofMarker); //Flush the stream @out.Flush(); if (compressOutput) { // Flush compressedOut.Finish(); compressedOut.ResetState(); } // Close the underlying stream iff we own it... if (ownOutputStream) { @out.Close(); } else { // Write the checksum checksumOut.Finish(); } compressedBytesWritten = rawOut.GetPos() - start; if (compressOutput) { // Return back the compressor CodecPool.ReturnCompressor(compressor); compressor = null; } @out = null; if (writtenRecordsCounter != null) { writtenRecordsCounter.Increment(numRecordsWritten); } }
/// <exception cref="System.IO.IOException"/> public virtual void Close() { // Close the underlying stream @in.Close(); // Release the buffer dataIn = null; buffer = null; if (readRecordsCounter != null) { readRecordsCounter.Increment(numRecordsRead); } // Return the decompressor if (decompressor != null) { decompressor.Reset(); CodecPool.ReturnDecompressor(decompressor); decompressor = null; } }
/// <exception cref="System.IO.IOException"/> public OldTrackingRecordWriter(ReduceTask reduce, JobConf job, Task.TaskReporter reporter, string finalName) { this.reduceOutputCounter = reduce.reduceOutputCounter; this.fileOutputByteCounter = reduce.fileOutputByteCounter; IList <FileSystem.Statistics> matchedStats = null; if (job.GetOutputFormat() is FileOutputFormat) { matchedStats = GetFsStatistics(FileOutputFormat.GetOutputPath(job), job); } fsStats = matchedStats; FileSystem fs = FileSystem.Get(job); long bytesOutPrev = GetOutputBytes(fsStats); this.real = job.GetOutputFormat().GetRecordWriter(fs, job, finalName, reporter); long bytesOutCurr = GetOutputBytes(fsStats); fileOutputByteCounter.Increment(bytesOutCurr - bytesOutPrev); }