/* _candidate_makeNewSpiderPage - makes a new SpiderPage out of this candidate object */ public SpiderPage _candidate_makeNewSpiderPage() { SpiderPage s = new SpiderPage(this.getUrl(), this.getLinkingToLinks(), this.getReferredByLinks(), this.getPageContent()); if (this._candidate_isAliasCandidate()) { s.addAliasUrl(this._candidate_getUrl()); } return(s); }
/* spiderFetch - ThreadPool QueueUserWorkItem method, gets the links on a page and adds them to the * candidates list. * @args - A _spiderFetch_SpiderDataWrapper object that will be cast back from a normal object. We * have to take normal objects as input because = requires a single * generic object to give its worker delegate (spiderFetch) as an argument. */ static void spiderFetch(Object args) { _SpiderDataWrapper_spiderFetch wrapper = (_SpiderDataWrapper_spiderFetch) args; Spider spider_obj = wrapper.getSpiderObject(); SpiderPage current_page = wrapper.getNewPage(); // check this thread into _thread_status, a list of int[]s, where [0] is the thread ID and [1] is // the status- 0 for not working and 1 for working. thread_index is used later to change this // thread id's status back to not working when it's done int thread_index = 0; bool thread_found = false; for (int i = 0; i < spider_obj._thread_status.Count; i++) { if (spider_obj._thread_status.ElementAt(i)[0] == Thread.CurrentThread.ManagedThreadId) { spider_obj._thread_status.ElementAt(i)[1] = 1; thread_index = i; thread_found = true; break; } } // need to make a new entry for this thread id in _thread_status... if (!thread_found) { // lock the thread when performing an operation that depends on _thread_status.Count, using // a local lock object Object lock_obj = new Object(); lock (lock_obj) { spider_obj._thread_status.Add(new int[]{ Thread.CurrentThread.ManagedThreadId, 1 }); thread_index = spider_obj._thread_status.Count - 1; } } spider_obj.writeStatus("thread id: " + Thread.CurrentThread.ManagedThreadId + ", spiderFetch(): fetching " + current_page.getUrl()); _SpiderDataWrapper_getLinks gl_wrapper = getLinks(current_page, spider_obj); string current_page_final_url = gl_wrapper.getFinalUrl(); List<SpiderPage> current_page_links = gl_wrapper.getNewLinks(); List<string> current_page_link_strings = new List<string>(); for (int q = 0; q < current_page_links.Count; q++) { SpiderPage qth_page = current_page_links.ElementAt(q); spider_obj._candidate_pages.Add(qth_page); current_page_link_strings.Add(qth_page.getUrl()); } spider_obj._candidate_pages.Add(new SpiderPage(current_page.getUrl(), current_page_final_url, current_page.getReferencedByUrls(), current_page_link_strings)); // set this thread id's status back to not working in _thread_status spider_obj._thread_status.ElementAt(thread_index)[1] = 0; }
public _SpiderDataWrapper_spiderFetch(Spider spider_obj, SpiderPage new_page) { this.spider_obj = spider_obj; this.new_page = new_page; }
/* getLinks() - find all the links on a given page * @startp - the page to be scanned for links, represented as an SpiderPage object (which has a referring * page) * @s - the Spider object in use */ static _SpiderDataWrapper_getLinks getLinks(SpiderPage startp, Spider s) { List<string> pre_pages = new List<string>(); string final_url = ""; List<SpiderPage> new_pages = new List<SpiderPage>(); StringBuilder sb = new StringBuilder(); byte[] buf = new byte[8192]; HttpWebRequest req = (HttpWebRequest)WebRequest.Create(startp.getUrl()); //req.Timeout = 1000; HttpWebResponse resp = null; try { resp = (HttpWebResponse)req.GetResponse(); } catch (Exception e) { s.writeStatus("ERROR: " + e.Message); s.writeStatus("\tpage - " + startp.getUrl() + "\n\t\treferred to by:"); List<string> curr_refs = startp.getReferencedByUrls(); for (int i = 0; i < curr_refs.Count; i++) { s.writeStatus("\t\t\t" + curr_refs.ElementAt(i)); } } if (resp != null) { // record the final Url after any redirects from this link final_url = resp.ResponseUri.AbsoluteUri; Stream resp_stream = resp.GetResponseStream(); string temp_string = null; int count = 0; do { count = resp_stream.Read(buf, 0, buf.Length); if (count != 0) { temp_string = Encoding.ASCII.GetString(buf, 0, count); sb.Append(temp_string); } } while (count > 0); HtmlDocument doc = new HtmlDocument(); doc.LoadHtml(sb.ToString()); var linksOnPage = from lnks in doc.DocumentNode.Descendants() where lnks.Name == "a" && lnks.Attributes["href"] != null && lnks.InnerText.Trim().Length > 0 select new { Url = lnks.Attributes["href"].Value, }; foreach (var link in linksOnPage) { if (link.Url.StartsWith("/")) { if (link.Url.EndsWith("/")) { pre_pages.Add(s.getBaseUrl() + link.Url); } else { pre_pages.Add(s.getBaseUrl() + link.Url + "/"); } } }; List<string> distinct_pre_pages = pre_pages.Distinct().ToList(); for (int m = 0; m < distinct_pre_pages.Count; m++) { new_pages.Add(new SpiderPage(distinct_pre_pages.ElementAt(m), startp.getUrl())); } } return new _SpiderDataWrapper_getLinks(final_url, new_pages); }
void spiderProcess() { _thread_status.ElementAt(0)[1] = 1; lock (this) { int candidate_pages_count = this._candidate_pages.Count; SpiderPage current_candidate_page = null; SpiderPage current_master_page = null; for (int i = 0; i < candidate_pages_count; i++) { bool found = false; current_candidate_page = this._candidate_pages.ElementAt(i); if (current_candidate_page.finalUrlNeeded()) { for (int m = 0; m < this.masterResults.Count; m++) { current_master_page = this.masterResults.ElementAt(m); if (!current_master_page.finalUrlNeeded() && current_candidate_page.getUrl() == current_master_page.getFinalUrl()) { found = true; List<string> current_candidate_page_ref_urls = current_candidate_page.getReferencedByUrls(); for (int g = 0; g < current_candidate_page_ref_urls.Count; g++) { if (!current_master_page.getReferencedByUrls().Contains(current_candidate_page_ref_urls.ElementAt(g))) { this.masterResults.ElementAt(j).addReferencedByUrl(current_candidate_page_ref_urls.ElementAt(g)); } } for (int j = 0; j < this.masterResults.Count; j++) { current_master_page = this.masterResults.ElementAt(j); if (current_master_page.finalUrlNeeded()) { if (current_candidate_page.getUrl() == current_master_page.getUrl()) { } if (current_candidate_page.getFinalUrl() == this.masterResults.ElementAt(j).getFinalUrl()) { found = true; // add an alias entry in the masterResults if we were redirected here if (current_candidate_page.getUrl() != current_candidate_page.getFinalUrl()) { this.masterResults.ElementAt(j).addAliasUrl(current_candidate_page.getUrl()); } // add all the linking URLs from the curr_page object to masterResults List<string> current_page_link_urls = current_candidate_page.getLinkingToUrls(); for (int q = 0; q < current_page_link_urls.Count; q++) { if (!this.masterResults.ElementAt(j).getLinkingToUrls().Contains(current_page_link_urls.ElementAt(q))) { this.masterResults.ElementAt(j).addLinkingToUrl(current_page_link_urls.ElementAt(q)); } } // add all the referring URLs from the curr_page object to masterResults List<string> current_page_ref_urls = current_candidate_page.getReferencedByUrls(); for (int g = 0; g < current_page_ref_urls.Count; g++) { if (!this.masterResults.ElementAt(j).getReferencedByUrls().Contains(current_page_ref_urls.ElementAt(g))) { this.masterResults.ElementAt(j).addReferencedByUrl(current_page_ref_urls.ElementAt(g)); } } break; } } // if this is a new page... if (!found) { this.masterResults.Add(current_candidate_page); ThreadPool.QueueUserWorkItem(new WaitCallback(spiderFetch), new _SpiderDataWrapper_spiderFetch(this, current_candidate_page)); } } // remove all the candidate pages we've just processed _candidate_pages.RemoveRange(0, candidate_pages_count); } Thread.Sleep(30000); if (this._candidate_pages.Count > 0) { this.spiderProcess(); } this._thread_status.ElementAt(0)[1] = 0; }
/* _candidate_makeNewSpiderPage - makes a new SpiderPage out of this candidate object */ public SpiderPage _candidate_makeNewSpiderPage() { SpiderPage s = new SpiderPage(this.getUrl(), this.getLinkingToLinks(), this.getReferredByLinks(), this.getPageContent()); if (this._candidate_isAliasCandidate()) { s.addAliasUrl(this._candidate_getUrl()); } return s; }
/* addNewPage() - adds a new page to the _master_results, i.e. a new officially vetted page * @new_page - the SpiderPage object to add */ void addNewPage(SpiderPage new_page) { this._master_results.Add(new_page); }
/* spiderProcess() - master spider process: * * PART 1: process the candidate pages that the fetchPage() threads * crawled after PART 2 of last round, generate a list of new * links for PART 2 (of this round) * PART 2: make new fetchPage() threads to crawl the new candidate pages * found in the links from PART 1 * */ static void spiderProcess(object o) { // cast our argument back to a Spider object Spider spider_object = (Spider)o; // loop spiderProcess() until we're done processing candidate pages do { // wait for all the worker threads to be done before starting each round of spiderProcess() bool ready = false; do { ready = spider_object.checkWorkerThreads(); } while (!ready); // all of this is dependent on _master_pages and _candidate_pages, need the spider object locked lock (spider_object) { // PART 1: process the candidate pages that were crawled by the worker threads created in // the last round of spiderProcess() // list of all the links found in the candidate pages we process List <SpiderLink> new_links_found = new List <SpiderLink>(); // list of all the candidate page URLs that we add to the master results this round List <string[]> added_candidate_urls = new List <string[]>(); int candidate_page_count = spider_object._candidate_pages.Count; // iterative for-loop, don't see a better way to do this really (or why we'd want one)... for (int i = 0; i < candidate_page_count; i++) { bool found = false; _SpiderPageCandidate current_candidate_page = spider_object.getCandidatePageAtIndex(i); // make sure this candidate page was crawled by fetchPage(), should be true for every // candidate page that didn't return a 404 or some error, etc. if (current_candidate_page._candidate_isDone()) { // see if this candidate page went to the same final URL as a page that we've already // added in this round of spiderProcess() int already_added_candidate_index = added_candidate_urls.FindIndex(delegate(string[] s) { return(s[0] == current_candidate_page.getUrl()); }); // two tests of whether this candidate page *could* already be in the master results: 1) if this page's // final URL is in the already-added-list (then it's certainly in the master results), or 2) it was an // alias candidate (i.e. a redirect to a different final url); otherwise we're guaranteed that this // candidate page is a new page, and therefore not already in the master results, and all of this // will be skipped if (already_added_candidate_index > -1 || current_candidate_page._candidate_isAliasCandidate()) { int real_page_index = -1; if (already_added_candidate_index > -1) { real_page_index = Int32.Parse(added_candidate_urls.ElementAt(already_added_candidate_index)[1]); } else { real_page_index = spider_object.findPageIndex(current_candidate_page.getUrl()); } // was it an existing page after all? if so, add any referring links that have been added to this // candidate page (i.e. links to its alias address that were found in PART 2 of spiderProcess() // last time), and add this alias URL to the existing page's list of alias URLs (if it was an alias, // it's also possible that the link that generated this candidate page was found after a link that // went to an alias of this page, in which case this one could not be an alias) if (real_page_index > -1) { found = true; SpiderPage real_page = spider_object.getPageAtIndex(real_page_index); List <SpiderLink> current_candidate_referred_links = current_candidate_page.getReferredByLinks(); // another iterative for-loop, doesn't need to be improved really afaik? for (int k = 0; k < current_candidate_referred_links.Count; k++) { real_page.addReferredByLink(current_candidate_referred_links.ElementAt(k)); } if (current_candidate_page._candidate_isAliasCandidate()) { real_page.addAliasUrl(current_candidate_page._candidate_getUrl()); } } } // this candidate page was a real new page- add it to the master results, add its links to the // new links found this round, and add it to the list of pages added this round if (!found) { SpiderPage new_page = current_candidate_page._candidate_makeNewSpiderPage(); new_links_found.AddRange(new_page.getLinkingToLinks()); spider_object.addNewPage(new_page); added_candidate_urls.Add(new string[] { new_page.getUrl(), spider_object.getLastPageIndex().ToString() }); } // this candidate page is done being processed- remove it from the list spider_object._candidate_pages.RemoveAt(i); candidate_page_count--; i--; } } // PART 2: make new candidate pages from the new links that go to pages we haven't seen before, // create new fetchPage() worker threads to crawl them List <_SpiderPageCandidate> new_candidate_pages = new List <_SpiderPageCandidate>(); for (int j = 0; j < new_links_found.Count; j++) { SpiderLink current_link = new_links_found.ElementAt(j); if (current_link.isLegalLink()) { // see if we've made a new candidate page for this link already int link_index = -1; // for-loop being used for search, DEFINITELY can be improved with some // better data-structure etc. for (int y = 0; y < new_candidate_pages.Count; y++) { if (new_candidate_pages.ElementAt(y)._candidate_getUrl() == current_link.getNormalizedUrl()) { link_index = y; break; } } // if we have made a new candidate page already, just add a referred-by link to the // candidate page we already made if (link_index > -1) { new_candidate_pages.ElementAt(link_index).addReferredByLink(current_link); } // otherwise, search the master results to see if we need to create a new candidate // page or not else { int real_page_index = spider_object.findPageIndex(current_link.getNormalizedUrl()); // if this link's URL exists in the master results already, just add a referred-by link if (real_page_index > -1) { SpiderPage real_page = spider_object.getPageAtIndex(real_page_index); real_page.addReferredByLink(current_link); } // otherwise, make a new candidate page from this link else { new_candidate_pages.Add(new _SpiderPageCandidate(current_link)); } } } } // create a new fetchPage() worker thread for every new candidate page we made // iterative for-loop, seems fine... for (int p = 0; p < new_candidate_pages.Count; p++) { spider_object._candidate_pages.Add(new_candidate_pages.ElementAt(p)); spider_object.addThreadStatus(); ThreadPool.QueueUserWorkItem(new WaitCallback(fetchPage), new _SpiderWorkItemDataWrapper(spider_object, spider_object._candidate_pages.Count - 1)); } } } // loop spiderProcess() until there are either no candidate pages in the list or there are only // error candidate pages left while (spider_object._candidate_pages.Count > 0 && spider_object._candidate_pages.Any(delegate(_SpiderPageCandidate spc) { return(!spc._candidate_isError()); })); // we're done spidering now, clear our _thread_status (the 0-index in _thread_status is reserved for // spiderProcess(), worker threads are indices > 0) spider_object._thread_status.RemoveAt(0); }