public bool isAlive(ConcurrentDictionary<int, LibPADIMapNoReduce.FileSplit> zombieQueue, List<string> jobTrackers, List<string> workers, LibPADIMapNoReduce.FileSplit[] jobQueue, ConcurrentDictionary<string, LibPADIMapNoReduce.FileSplit> onGoingWork, string jobTrackerUrl) { handleFreezeWorker(); if (url == jobTrackerUrl) { return true; } this.jobTrackerUrl = jobTrackerUrl; this.jobTrackers = jobTrackers; this.workers = workers; this.jobQueue = new ConcurrentQueue<LibPADIMapNoReduce.FileSplit>(jobQueue); this.zombieQueue = new ConcurrentDictionary<int, LibPADIMapNoReduce.FileSplit>(zombieQueue); this.onGoingWork = onGoingWork; return true; }
public void work(LibPADIMapNoReduce.FileSplit fileSplits) { handleFreezeWorker(); // For handling FREEZEW from PuppetMaster handleSlowMap(); // For handling SLOWW from PuppetMaster CURRENT_STATUS_WORKER = STATUS.WORKER_WORKING; // For STATUS command of PuppetMaster PERCENTAGE_FINISHED = 0; PADIMapNoReduce.Pair<long, long> byteInterval = fileSplits.pair; Console.WriteLine("Received job for bytes: " + byteInterval.First + " to " + byteInterval.Second); if (workerSetup) { long splitSize = byteInterval.Second - byteInterval.First; PADIMapNoReduce.IClient client = (PADIMapNoReduce.IClient)Activator.GetObject(typeof(PADIMapNoReduce.IClient), clientUrl); if (splitSize <= BATCH_REQUEST_SIZE) //Request all { CURRENT_STATUS_WORKER = STATUS.WORKER_TRANSFERING_INPUT; List<byte> splitBytes = client.processBytes(byteInterval, filePath); CURRENT_STATUS_WORKER = STATUS.WORKER_WORKING; string[] splitLines = System.Text.Encoding.UTF8.GetString(splitBytes.ToArray()).Split(new string[] { Environment.NewLine }, System.StringSplitOptions.RemoveEmptyEntries); splitBytes.Clear(); map(ref splitLines, fileSplits.splitId, true); } else //request batch { for (long i = byteInterval.First; i < byteInterval.Second; i += BATCH_REQUEST_SIZE) { handleFreezeWorker(); // For handling FREEZEW from PuppetMaster handleSlowMap(); PADIMapNoReduce.Pair<long, long> miniByteInterval; if (i + BATCH_REQUEST_SIZE > byteInterval.Second) { miniByteInterval = new PADIMapNoReduce.Pair<long, long>(i, byteInterval.Second); } else { miniByteInterval = new PADIMapNoReduce.Pair<long, long>(i, i + BATCH_REQUEST_SIZE); } CURRENT_STATUS_WORKER = STATUS.WORKER_TRANSFERING_INPUT; List<byte> splitBytes = client.processBytes(miniByteInterval, filePath); CURRENT_STATUS_WORKER = STATUS.WORKER_WORKING; string[] splitLines = System.Text.Encoding.UTF8.GetString(splitBytes.ToArray()).Split(new string[] { Environment.NewLine }, System.StringSplitOptions.RemoveEmptyEntries); splitBytes.Clear(); if (!map(ref splitLines, fileSplits.splitId, false)) return; // We need something more coarse because we can't get the current size being processed due to different encodings PERCENTAGE_FINISHED = (float)(i - byteInterval.First) / (float)(byteInterval.Second - byteInterval.First); } PERCENTAGE_FINISHED = 1; } } else { Console.WriteLine("Worker is not set"); } PERCENTAGE_FINISHED = 1; // For STATUS command of PuppetMaster CURRENT_STATUS_WORKER = STATUS.WORKER_WAITING; // For STATUS command of PuppetMaster //In case, at the call to the jobtracker he is down, the worker stay on the while until the variable jobTracker be actualized bool success = false; while (!success) { //Notify JobTracker PADIMapNoReduce.IJobTracker jobTracker = (PADIMapNoReduce.IJobTracker)Activator.GetObject(typeof(PADIMapNoReduce.IJobTracker), jobTrackerUrl); try { jobTracker.notifySplitFinish(url, fileSplits); success = true; } catch (System.Net.Sockets.SocketException) { //Console.WriteLine("In WORK func - Couldn't contact to JobTracker! Waiting for the new one..."); } } }
public void notifySplitFinish(string workerUrl, LibPADIMapNoReduce.FileSplit fileSplit) { handleFreezeJobTracker(); // For handling FREEZEC from PuppetMaster Console.WriteLine("Worker {0} finished split {1}", workerUrl, fileSplit.splitId); PADIMapNoReduce.IWorker worker = (PADIMapNoReduce.IWorker)Activator.GetObject(typeof(PADIMapNoReduce.IWorker), workerUrl); LibPADIMapNoReduce.FileSplit job = null; if (jobQueue.TryDequeue(out job)) { try { if (onGoingWork.ContainsKey(workerUrl)) //UPDATE { onGoingWork[workerUrl] = job; } else //ADD { onGoingWork.TryAdd(workerUrl, job); } worker.work(job); } catch (System.Net.Sockets.SocketException) { // The worker is probably down but it'll be removed when the job tracker checks if they are alive or not } } else { PADIMapNoReduce.IClient client = (PADIMapNoReduce.IClient)Activator.GetObject(typeof(PADIMapNoReduce.IClient), clientUrl); LibPADIMapNoReduce.FileSplit jobz = null; IEnumerator<int> it = (IEnumerator<int>) zombieQueue.Keys.GetEnumerator(); bool hasFirst = it.MoveNext(); if (hasFirst && zombieQueue.TryRemove(it.Current, out jobz)) { client.removeFile(fileSplit.splitId); try { //Removes the zombie worker from ongoingWork foreach (string s in onGoingWork.Keys) { LibPADIMapNoReduce.FileSplit job1 = null; onGoingWork.TryGetValue(s, out job1); if (job1.splitId == jobz.splitId) { LibPADIMapNoReduce.FileSplit deletedJob = null; onGoingWork.TryRemove(s, out deletedJob); Console.WriteLine("removi a Key = {0}", s); } } if (onGoingWork.ContainsKey(workerUrl)) //UPDATE { onGoingWork[workerUrl] = jobz; } else //ADD { onGoingWork.TryAdd(workerUrl, jobz); } worker.work(jobz); } catch (System.Net.Sockets.SocketException) { // The worker is probably down but it'll be removed when the job tracker checks if they are alive or not } } else { LibPADIMapNoReduce.FileSplit job2 = null; onGoingWork.TryRemove(workerUrl, out job2); if (onGoingWork.Count == 0) { try { client.jobConcluded(); System.Console.WriteLine("////////////JOB CONCLUDED/////////////////"); CURRENT_STATUS_JOBTRACKER = STATUS.JOBTRACKER_WAITING; // For STATUS command of PuppetMaster } catch (Exception e) { System.Console.WriteLine("EXCEPTION: " + e.Message); CURRENT_STATUS_JOBTRACKER = STATUS.JOBTRACKER_WAITING; // For STATUS command of PuppetMaster return; } } } } }