/// <summary>Performs the aggregate operation on a blobset.</summary> /// <param name="jobName">The name of the job.</param> public void PerformAggregate(string jobName) { // 1. Load config // 2. Do aggregation // 3. Store result // 4. Delete reduced data // 1. Load config var config = GetJobConfig(jobName).Value; var reducedBlobPrefix = ReducedBlobName.GetPrefix(jobName); var aggregateResults = new List <object>(); Type mapOut = Type.GetType(config.TMapOutType); // 2. Load reduced items and do aggregation string ignored; foreach (var blobName in _blobStorage.ListBlobNames(reducedBlobPrefix)) { var blob = _blobStorage.GetBlob(blobName.ContainerName, blobName.ToString(), mapOut, out ignored); if (!blob.HasValue) { continue; } aggregateResults.Add(blob.Value); } IMapReduceFunctions mapReduceFunctions = GetMapReduceFunctions(config.MapReduceFunctionsImplementor); while (aggregateResults.Count > 1) { object item1 = aggregateResults[0]; object item2 = aggregateResults[1]; aggregateResults.RemoveAt(0); aggregateResults.RemoveAt(0); object aggregResult = InvokeAsDelegate(mapReduceFunctions.GetReducer(), item1, item2); aggregateResults.Add(aggregResult); } // 3. Store aggregated result var aggregatedBlobName = AggregatedBlobName.Create(jobName); _blobStorage.PutBlob(aggregatedBlobName.ContainerName, aggregatedBlobName.ToString(), aggregateResults[0], mapOut, false, out ignored); // 4. Delete reduced data _blobStorage.DeleteAllBlobs(reducedBlobPrefix); }
/// <summary>Pushes a batch of items for processing.</summary> /// <param name="functions">The functions for map/reduce/aggregate operations.</param> /// <param name="items">The items to process (at least two).</param> /// <param name="workerCount">The max number of workers to use.</param> /// <returns>The batch ID.</returns> /// <exception cref="InvalidOperationException">If the method was already called.</exception> /// <exception cref="ArgumentException">If <paramref name="items"/> contains less than two items.</exception> public string PushItems(IMapReduceFunctions functions, IList <TMapIn> items, int workerCount) { lock (_jobName) { if (_itemsPushed) { throw new InvalidOperationException("A batch was already pushed to the work queue"); } var blobSet = new MapReduceBlobSet(_blobStorage, _queueStorage); blobSet.GenerateBlobSets(_jobName, new List <object>(from i in items select(object) i), functions, workerCount, typeof(TMapIn), typeof(TMapOut)); _itemsPushed = true; return(_jobName); } }
/// <summary>Generates the blob sets that are required to run cloud-based map/reduce operations.</summary> /// <param name="jobName">The name of the job (should be unique).</param> /// <param name="items">The items that must be processed (at least two).</param> /// <param name="functions">The map/reduce/aggregate functions (aggregate is optional).</param> /// <param name="workerCount">The number of workers to use.</param> /// <param name="mapIn">The type of the map input.</param> /// <param name="mapOut">The type of the map output.</param> /// <remarks>This method should be called from <see cref="T:MapReduceJob"/>.</remarks> public void GenerateBlobSets(string jobName, IList <object> items, IMapReduceFunctions functions, int workerCount, Type mapIn, Type mapOut) { // Note: items is IList and not IEnumerable because the number of items must be known up-front // 1. Store config // 2. Put blobs and queue job messages // 3. Put messages in the work queue int itemCount = items.Count; // Note: each blobset should contain at least two elements int blobSetCount = Math.Min(workerCount, itemCount); float blobsPerSet = (float)itemCount / (float)blobSetCount; string ignored; // 1. Store configuration var configBlobName = MapReduceConfigurationName.Create(jobName); var config = new MapReduceConfiguration() { TMapInType = mapIn.AssemblyQualifiedName, TMapOutType = mapOut.AssemblyQualifiedName, MapReduceFunctionsImplementor = functions.GetType().AssemblyQualifiedName, BlobSetCount = blobSetCount }; _blobStorage.PutBlob(configBlobName.ContainerName, configBlobName.ToString(), config, typeof(MapReduceConfiguration), false, out ignored); // 2.1. Allocate blobsets var allNames = new InputBlobName[blobSetCount][]; int processedBlobs = 0; for (int currSet = 0; currSet < blobSetCount; currSet++) { // Last blobset might be smaller int thisSetSize = currSet != blobSetCount - 1 ? (int)Math.Ceiling(blobsPerSet) : itemCount - processedBlobs; allNames[currSet] = new InputBlobName[thisSetSize]; processedBlobs += thisSetSize; } if (processedBlobs != itemCount) { throw new InvalidOperationException("Processed Blobs are less than the number of items"); } // 2.2. Store input data (separate cycle for clarity) processedBlobs = 0; for (int currSet = 0; currSet < blobSetCount; currSet++) { for (int i = 0; i < allNames[currSet].Length; i++) { // BlobSet and Blob IDs start from zero allNames[currSet][i] = InputBlobName.Create(jobName, currSet, i); var item = items[processedBlobs]; _blobStorage.PutBlob(allNames[currSet][i].ContainerName, allNames[currSet][i].ToString(), item, mapIn, false, out ignored); processedBlobs++; } _queueStorage.Put(JobsQueueName, new JobMessage() { Type = MessageType.BlobSetToProcess, JobName = jobName, BlobSetId = currSet }); } }