コード例 #1
0
        // TODO - allow "*" wildcard for all sites, implement logging if package name doesn't exist or if package mapping file specifies files that aren't added?
        public void prioritizeConfigs(String sites, String packageName)
        {
            PackageTranslator pt        = new PackageTranslator();
            IList <String>    fileNames = pt.getFilesInPackage(packageName);

            IList <String> siteCodes = sites.Split(new char[] { ';' }).ToList();

            ISqlDao sqlDao = new SqlDaoFactory().getSqlDao(new SqlConnectionFactory().getConnection(
                                                               ConfigurationManager.AppSettings[config.AppConfigSettingsConstants.SqlProvider],
                                                               ConfigurationManager.AppSettings[config.AppConfigSettingsConstants.SqlConnectionString]));
            IList <ExtractorConfiguration> configsFromDb = sqlDao.getActiveExtractorConfigurations();

            IList <ExtractorConfiguration> configsToPrioritize = new List <ExtractorConfiguration>();

            foreach (String fileName in fileNames)
            {
                foreach (ExtractorConfiguration ec in configsFromDb)
                {
                    if (String.Equals(fileName, ec.QueryConfigurations.RootNode.Value.File))
                    {
                        foreach (String site in siteCodes)
                        {
                            ExtractorConfiguration newConfig = ec.Clone(); // need to clone because we're giving each config a unique site code!
                            newConfig.SiteCode = site;
                            configsToPrioritize.Add(newConfig);
                        }
                    }
                }
            }

            // finally ready to send to orchestrator!
            prioritizeConfigs(configsToPrioritize);
        }
コード例 #2
0
        public EtlDownstreamStageArray getEtlStagesBySite(String downstreamBatchId)
        {
            ISqlDao sqlDao = new SqlDaoFactory().getSqlDao(new SqlConnectionFactory().getConnection(
                                                               ConfigurationManager.AppSettings[config.AppConfigSettingsConstants.SqlProvider],
                                                               ConfigurationManager.AppSettings[config.AppConfigSettingsConstants.SqlConnectionString]));

            return(new EtlDownstreamStageArray(sqlDao.getEtlStagesForDownstreamBatch(downstreamBatchId)));
        }
コード例 #3
0
        public ReportTO getExtractorReport(String siteId, String vistaFile, String batchId)
        {
            ISqlDao sqlDao = new SqlDaoFactory().getSqlDao(new SqlConnectionFactory().getConnection(
                                                               ConfigurationManager.AppSettings[config.AppConfigSettingsConstants.SqlProvider],
                                                               ConfigurationManager.AppSettings[config.AppConfigSettingsConstants.SqlConnectionString]));

            return(new ReportTO(sqlDao.getExtractorReport(siteId, vistaFile, batchId)));
            //return sqlDao.getExtractorReport(siteId, vistaFile, batchId).InfoMessages.First();
        }
コード例 #4
0
        public MessageTO getLastRunCompletedTimeAndBatchId()
        {
            String  sqlProvider      = ConfigurationManager.AppSettings[config.AppConfigSettingsConstants.SqlProvider];
            String  connectionString = ConfigurationManager.AppSettings[config.AppConfigSettingsConstants.SqlConnectionString];
            ISqlDao sqlDao           = new SqlDaoFactory().getSqlDao(new SqlConnectionFactory().getConnection(sqlProvider, connectionString));

            return(new MessageTO()
            {
                MessageType = MessageTypeTO.LastRunInfoResponse,
                Message = gov.va.medora.utils.JsonUtils.Serialize <KeyValuePair <String, DateTime> >(sqlDao.getLastRunCompletedTimeAndBatchId())
            });
        }
コード例 #5
0
        /// <summary>
        /// Check to see if any post processing needs to happen for this file. If so, do it.
        /// </summary>
        /// <param name="request">The last request we serviced</param>
        private void postProcess(object obj)
        {
            MessageTO request = (MessageTO)obj;

            // Set up
            MessageTO response = new MessageTO();

            response.MessageType = MessageTypeTO.JobCompletedResponse;

            // Protect
            if (request == null || String.IsNullOrEmpty(request.Configuration.SiteCode) ||
                String.IsNullOrEmpty(request.Configuration.QueryConfigurations.RootNode.Value.File))
            {
                return;
            }

            // Do our work
            try
            {
                String  sqlProvider      = ConfigurationManager.AppSettings[config.AppConfigSettingsConstants.SqlProvider];
                String  connectionString = ConfigurationManager.AppSettings[config.AppConfigSettingsConstants.SqlConnectionString];
                ISqlDao sqlDao           = new SqlDaoFactory().getSqlDao(new SqlConnectionFactory().getConnection(sqlProvider, connectionString));

                //if (request.Configuration.ON_COMPLETE != null)
                //{
                if (ActiveExtractions.ExistsByFile(request.Configuration.QueryConfigurations.RootNode.Value.File)
                    ||
                    WorkStack.ExistsByFile(request.Configuration.QueryConfigurations.RootNode.Value.File)
                    )
                {
                }
                else
                {
                    //sqlDao.executeDelimited(request.Configuration.ON_COMPLETE, 5 * 60 * 1000);
                    //// for each query configuration, disable indexes on the file
                    //IList<string> distinctFiles = new List<string>();
                    //parseDistinctFiles(request.Configuration.QueryConfigurations.RootNode, distinctFiles);
                    //foreach (string file in distinctFiles)
                    //{
                    //    sqlDao.enableIndexes(file);
                    //}
                    // TODO: Add support for the SQL server version of this
                    //sqlDao.executeStoredProcedureNoArguments(request.Configuration.QueryConfigurations.RootNode.Value.File + "_END", 10 * 60 * 1000);
                    sqlDao.postProcess(request.Configuration.QueryConfigurations.RootNode);
                }
                //}
            }
            catch
            {
                // TODO: REQ HANDLER NEEDS A LOGGING MECHANISM THAT IS THREAD-SAFE
            }
        }
コード例 #6
0
        void bindDataGrid()
        {
            try
            {
                ISqlDao sqlDao = new SqlDaoFactory().getSqlDao(new SqlConnectionFactory().getConnection(_sqlProvider, _sqlConnectionString));
                //ConfigurationManager.AppSettings[config.AppConfigSettingsConstants.SqlProvider],
                //ConfigurationManager.AppSettings[config.AppConfigSettingsConstants.SqlConnectionString]));
                IList <EtlDownstreamMapItem> items = sqlDao.getEtlDownstreamMap();

                if (checkboxShowLastBatchOnly.Checked) // filter by site and highest batch ID number
                {
                    Dictionary <String, EtlDownstreamMapItem> filteredCollection = new Dictionary <string, EtlDownstreamMapItem>();
                    foreach (EtlDownstreamMapItem mapItem in items)
                    {
                        if (!filteredCollection.ContainsKey(mapItem.SiteId))
                        {
                            filteredCollection.Add(mapItem.SiteId, mapItem);
                            continue;
                        }

                        Int32 currentEtlBatchAsInt            = Convert.ToInt32(mapItem.EtlBatchId);
                        Int32 filteredCollectionEtlBatchAsInt = Convert.ToInt32(filteredCollection[mapItem.SiteId].EtlBatchId);

                        if (currentEtlBatchAsInt > filteredCollectionEtlBatchAsInt)
                        {
                            filteredCollection[mapItem.SiteId] = mapItem; // overwrite with current map item
                        }
                    }

                    EtlDownstreamMapItem[] temp = new EtlDownstreamMapItem[filteredCollection.Count];
                    filteredCollection.Values.CopyTo(temp, 0);
                    items = temp.ToList();
                }

                dataGridEtlDownstreamMap.DataSource = items;
                dataGridEtlDownstreamMap.DataBind();

                Session["MapItems"] = items;
            }
            catch (Exception exc)
            {
                labelMessage.Text = "There was a problem fetching the map table from the database: " + exc.Message;
            }
        }
コード例 #7
0
        static ExtractorConfiguration getExtractorConfigFromDb(String vistaFile)
        {
            ISqlDao sqlDao = new SqlDaoFactory().getSqlDao(new SqlConnectionFactory().getConnection(
                                                               ConfigurationManager.AppSettings[config.AppConfigSettingsConstants.SqlProvider],
                                                               ConfigurationManager.AppSettings[config.AppConfigSettingsConstants.SqlConnectionString]));
            IList <ExtractorConfiguration> allConfigs = sqlDao.getActiveExtractorConfigurations();

            ExtractorConfiguration selected = null;

            foreach (ExtractorConfiguration ec in allConfigs)
            {
                if (String.Equals(ec.QueryConfigurations.RootNode.Value.File, vistaFile))
                {
                    selected = ec;
                    break;
                }
            }
            return(selected);
        }
コード例 #8
0
        internal void checkSiteCompleteAndTrigger(String extractorBatchId, String sitecode, String vistaFile)
        {
            // if no queued jobs and no active jobs for site
            if (!WorkStack.ContainsBySite(sitecode) && !ActiveExtractions.ContainsBySite(sitecode))
            {
                // TODO!!!! NEED TO CHECK AND MAKE SURE ALL JOBS FINISHED SUCCESSFULLY FOR THIS SIT AND BATCH AND ROLL BACK LAST IENS WRITTEN TO MDO IEN TRACKING AND ALSO NOT CREATE TRIGGER FILE
                String  sqlProvider      = ConfigurationManager.AppSettings[config.AppConfigSettingsConstants.SqlProvider];
                String  connectionString = ConfigurationManager.AppSettings[config.AppConfigSettingsConstants.SqlConnectionString];
                ISqlDao sqlDao           = new SqlDaoFactory().getSqlDao(new SqlConnectionFactory().getConnection(sqlProvider, connectionString));

                // TODO - need to make error handling a bit more sophisticated so it can deal with non-critical file failures in certain modes and similar non-failure type events
                if (sqlDao.hasBatchAndSiteErrors(extractorBatchId, sitecode))
                {
                    sqlDao.saveExceptional(new domain.reporting.Exceptional()
                    {
                        Code = domain.reporting.ErrorCode.TRIGGER_NOT_CREATED, Message = "Trigger not created due to error for site/batch"
                    }, extractorBatchId, sitecode, vistaFile);
                    logging.Log.LOG("Found error for site and didn't create batch file - logged event to database");
                    return;
                }

                //IList<Int32> errorCodes = sqlDao.getErrorsForBatchAndSite(extractorBatchId, sitecode);
                //foreach (Int32 code in errorCodes)
                //{
                //    if (code > 1) // TODO - NOT CRAZY ABOUT THIS!!! Need better way to encapsulate what error codes are acceptable and which are not. Will at least serve as proof of concept for now
                //    {
                //        // don't create trigger file! need to log event somehow
                //        domain.reporting.OrchestratorReport noTriggerRpt = new domain.reporting.OrchestratorReport(extractorBatchId);
                //        noTriggerRpt.addError(String.Format("Not creating trigger for site {0}, batch {1} due to error code > 1", sitecode, extractorBatchId));
                //        sqlDao.saveReport(noTriggerRpt);
                //        return;
                //    }
                //}


                // woot! site is finished - we can go ahead and trigger
                logging.Log.LOG("Site complete!!! Creating trigger for " + sitecode);
                IFileDao fileDao = new FileDaoFactory().getFileDao();
                fileDao.setExtractsDirectory(sitecode, this.BatchDirectory);
                fileDao.createMarkerFile();
            }
        }
コード例 #9
0
        /// <summary>
        ///  For new job requests, do the following:
        ///      1. Make sure we can call client back on specified hostname
        ///      2. Pop a job off the work stack
        ///      3. Try locking the site/file from the popped job (put job back on stack if fail)
        ///      4. Send response back to client
        /// </summary>
        /// <param name="request"></param>
        /// <returns></returns>
        public MessageTO getNewJobResponse(MessageTO request)
        {
            MessageTO response = new MessageTO();

            response.MessageType = MessageTypeTO.NewJobResponse;

            if (String.Equals(request.HostName, "SansCallback") && request.ListeningPort == 0)
            {
                // this is from a test - don't require a callback
            }
            else
            {
                try
                {
                    //LOG.Debug("Received a new job response - first checking to make sure we can communicate with the client");
                    Client c = new Client();
                    c.connect(request.HostName, request.ListeningPort);
                    c.disconnect();
                    //LOG.Debug("Successfully connected to the client! Going to try sending the client a job now...");
                }
                catch (Exception exc)
                {
                    response.MessageType = MessageTypeTO.Error;
                    response.Error       = exc.ToString();
                    //LOG.Debug("Couldn't call back client who requested new job!", exc);
                    return(response);
                }
            }

            if (this.WorkStack == null)
            {
                //LOG.Debug("The work stack is null! Has the application been initialized properly?");
                response.MessageType = MessageTypeTO.Error;
                response.Error       = "The WorkStack has not been initialized!";
                return(response);
            }
            // I think we were opening up a race condition here - now we just try and pop a job and see if we got one
            // instead of making a call for the count and then another to pop a job
            //if (WorkStack.Count() == 0)
            //{
            //    LOG.Debug("Looks like there are no more jobs on the work stack! Tell the client thanks but nothing to do");
            //    response.Message = "No more jobs on the stack!";
            //    return response;
            //}

            //LOG.Debug("Found a job for the client - popping it off the stack");

            // Don't process new jobs if we are currently locked
            if (_locked)
            {
                response.Message = "No more jobs on the stack!";
                return(response);
            }

            //ExtractorConfiguration responseConfig = WorkStack.PopSiteUnique(ActiveExtractions);
            ExtractorConfiguration responseConfig = WorkStack.Pop();

            if (responseConfig == null)
            {
                //LOG.Debug("Looks like there are no more jobs on the work stack! Tell the client thanks but nothing to do");
                response.Message = "No more jobs on the stack!";
                return(response);
            }
            String  sqlProvider      = ConfigurationManager.AppSettings[config.AppConfigSettingsConstants.SqlProvider];
            String  connectionString = ConfigurationManager.AppSettings[config.AppConfigSettingsConstants.SqlConnectionString];
            ISqlDao sqlDao           = new SqlDaoFactory().getSqlDao(new SqlConnectionFactory().getConnection(sqlProvider, connectionString));

            try
            {
                if (!sqlDao.lockSite(responseConfig.SiteCode, responseConfig.QueryConfigurations.RootNode.Value.File,
                                     request.HostName, Convert.ToInt16(request.ListeningPort)))
                {
                    //LOG.Debug("Unable to lock the new job for the client! Tell them we couldn't assign a new job");
                    response.MessageType = MessageTypeTO.Error;
                    response.Error       = "Unable to lock site!";
                    return(response);
                }
            }
            catch (Exception exc)
            {
                WorkStack.Push(responseConfig); // put it back on stack if error occured

                response.MessageType = MessageTypeTO.Error;
                response.Error       = exc.ToString();
                //LOG.Error(exc);
                return(response);
            }

            ActiveExtractions.Push(responseConfig);
            response.Configuration = responseConfig;
            response.Extractor     = new Extractor(request.HostName, request.ListeningPort,
                                                   response.Configuration.SiteCode, response.Configuration.QueryConfigurations.RootNode.Value.File, DateTime.Now);

            Extractors.Add(response.Extractor);
            //LOG.Debug("Successfully obtained a new job for our client and locked it in the database! Sending...");
            response.Extractor.Configuration = responseConfig;

            // now passing the extractor a directory name in which to dump all extractions
            response.Message = this.BatchDirectory;
            // end passing dir name

            /// TBD - maybe provide a start value in the event a vista extraction stopped in the middle of a file and
            /// the extractor
            //try
            //{
            //    FileDao fileDao = new FileDao();
            //    string lastFileIen = fileDao.getLastIen(response.Configuration.SiteCode, response.Configuration.VistaFile);
            //    int lastIen = Convert.ToInt32(lastFileIen);
            //    if (lastIen > 0)
            //    {
            //        response.Extractor.
            //    }
            //}
            //    catch (Exception exc)
            //    {

            //    }

            return(response);
        }
コード例 #10
0
        public MessageTO getJobErrorResponse(MessageTO request)
        {
            MessageTO response = new MessageTO();

            response.MessageType = MessageTypeTO.JobErrorResponse;
            if (request == null || request.Configuration == null || !request.Configuration.isCompleteConfiguration())
            {
                response.MessageType = MessageTypeTO.Error;
                response.Error       = "Incomplete ExtractorConfiguration parameter on request";
                logging.Log.LOG("The ExtractorConfiguration object sent to the job error request handler is incomplete! Unable to process request");
                //LOG.Error("The ExtractorConfiguration object sent to the job error request handler is incomplete! Unable to process request: " + request.Configuration.ToString());
                return(response);
            }
            if (WorkStack == null)
            {
                logging.Log.LOG("The work stack is null for the job error request. Was everything setup correctly?");
                //LOG.Debug("The work stack is null - was everything set up correctly?");
                response.MessageType = MessageTypeTO.Error;
                response.Error       = "WorkStack has not been initialized";
                return(response);
            }

            // the remove function keys off the site code and vista file - those are the only params we need
            Extractors.Remove(new Extractor("", 0, request.Configuration.SiteCode, request.Configuration.QueryConfigurations.RootNode.Value.File, DateTime.Now));
            ActiveExtractions.Remove(request.Configuration);
            ErroredExtractions.Push(request.Configuration);

            // bug fix: some edge cases will allow duplicate configs to be added to stack - need to check not already there
            //bool alreadyOnStack = false;
            //if (WorkStack.Contains(request.Configuration))
            //{
            //    //LOG.Debug("It looks like this job was already on the stack - no need to add it again");
            //    alreadyOnStack = true;
            //}
            // decided to temporarily stop adding errored jobs back to the stack to check for patterns in error site codes
            //if (!alreadyOnStack)
            //{
            //    LOG.Debug("Adding a job back to the work stack since the client reported an error while processing");
            //    ErroredExtractions.Push(request.Configuration);
            //    ActiveExtractions.Remove(request.Configuration);
            //    WorkStack.Push(request.Configuration);
            //}
            // end bug fix

            try
            {
                String  sqlProvider      = ConfigurationManager.AppSettings[config.AppConfigSettingsConstants.SqlProvider];
                String  connectionString = ConfigurationManager.AppSettings[config.AppConfigSettingsConstants.SqlConnectionString];
                ISqlDao sqlDao           = new SqlDaoFactory().getSqlDao(new SqlConnectionFactory().getConnection(sqlProvider, connectionString));
                if (request.ExtractorReport != null)
                {
                    try
                    {
                        logging.Log.LOG("Saving error report...");
                        sqlDao.saveReport(request.ExtractorReport);
                    }
                    catch (Exception exc)
                    {
                        //LOG.Error("Unable to save an extractor's report!", exc);
                    }
                }
                sqlDao.unlockSite(request.Configuration.SiteCode, request.Configuration.QueryConfigurations.RootNode.Value.File);
                //LOG.Debug("Successfully unlocked job so another client can process");

                // should check site completion on job error reports too
                try
                {
                    checkSiteCompleteAndTrigger(request.ExtractorReport.BatchId, request.Configuration.SiteCode, request.Configuration.QueryConfigurations.RootNode.Value.File);
                }
                catch (Exception te)
                { /* problems with trigger file shouldn't effect extractor */
                    logging.Log.LOG("An error occured when checking trigger file creation criteria: " + te.ToString());
                }
            }
            catch (Exception exc)
            {
                logging.Log.LOG("The call to unlock the extraction job for site " + request.Configuration.SiteCode +
                                ", file " + request.Configuration.QueryConfigurations.RootNode.Value.File + " has failed unexpectedly");
                logging.Log.LOG(exc.Message);

                response.MessageType = MessageTypeTO.Error;
                response.Error       = exc.ToString();
                return(response);
            }
            return(response);
        }
コード例 #11
0
        public MessageTO getJobCompletedResponse(MessageTO request)
        {
            MessageTO response = new MessageTO();

            response.MessageType = MessageTypeTO.JobCompletedResponse;

            if (request == null || String.IsNullOrEmpty(request.Configuration.SiteCode) ||
                String.IsNullOrEmpty(request.Configuration.QueryConfigurations.RootNode.Value.File))
            {
                //LOG.Debug("Received an invalid job completed request from the client - unable to process: " + request.Configuration.ToString());
                logging.Log.LOG("Problem with job complete request!");
                response.MessageType = MessageTypeTO.Error;
                response.Error       = "Invalid request!";
                return(response);
            }

            try
            {
                String  sqlProvider      = ConfigurationManager.AppSettings[config.AppConfigSettingsConstants.SqlProvider];
                String  connectionString = ConfigurationManager.AppSettings[config.AppConfigSettingsConstants.SqlConnectionString];
                ISqlDao sqlDao           = new SqlDaoFactory().getSqlDao(new SqlConnectionFactory().getConnection(sqlProvider, connectionString));
                if (request.ExtractorReport != null)
                {
                    try
                    {
                        logging.Log.LOG("Saving successful job report...");
                        sqlDao.saveReport(request.ExtractorReport);
                    }
                    catch (Exception exc)
                    {
                        logging.Log.LOG("Unable to save extractors report: " + request.Configuration.SiteCode + " - " + request.Configuration.QueryConfigurations.RootNode.Value.File + ", " + exc.Message);
                    }
                }
                // get last IEN from message and save to tracking table
                sqlDao.saveLastIen(request.Configuration.SiteCode, request.Configuration.QueryConfigurations.RootNode.Value.File, request.ExtractorReport.StartIen, request.ExtractorReport.LastIen, request.ExtractorReport.BatchId);
                // unlock site from locks table
                sqlDao.unlockSite(request.Configuration.SiteCode, request.Configuration.QueryConfigurations.RootNode.Value.File);
                // the remove function keys off the site code and vista file - those are the only params we need
                Extractors.Remove(new Extractor("", 0, request.Configuration.SiteCode, request.Configuration.QueryConfigurations.RootNode.Value.File, DateTime.Now));
                CompletedExtractions.Push(request.Configuration);
                ActiveExtractions.Remove(request.Configuration);
                // all done with site?
                try
                {
                    checkSiteCompleteAndTrigger(request.ExtractorReport.BatchId, request.Configuration.SiteCode, request.Configuration.QueryConfigurations.RootNode.Value.File);
                }
                catch (Exception triggerExc) // shouldn't fail job complete request if there is a problem with this - we just won't create the trigger
                {
                    logging.Log.LOG("An unexpected error occured when checking if it was ok to create the trigger for site " + request.Configuration.SiteCode + "\r\n\r\n" + triggerExc.ToString());
                }
                response.Message = "Nice Work!";
            }
            catch (Exception exc)
            {
                logging.Log.LOG(exc.Message);
                response.MessageType = MessageTypeTO.Error;
                response.Error       = exc.ToString();
            }

            // handle post process in separate thread
            Thread postProcessThread = new Thread(new ParameterizedThreadStart(RequestHandler.getInstance().postProcess));

            postProcessThread.Start(request);

            return(response);
        }
コード例 #12
0
        static void extractBcmaFromCdw(string[] args)
        {
            String sitecode = args[0];
            KeyValuePair <String, String> range = getRangeFromCdw(sitecode);

            //String startIenFirstWorker = args[1];
            //String lastIenFromCdw = getGreatestCdwIen(sitecode);
            logging.Log.LOG(String.Format("IEN range from CDW for {0} : {1} to {2}", sitecode, range.Key, range.Value));

            Int32 workerThreads = 8;

            IList <String> startPoints = getStartPointsForWorkerThreads(range.Key, range.Value, workerThreads);

            ExtractorConfiguration config = getExtractorConfig("53.79");

            config.QueryConfigurations.RootNode.Value.IdentifiedFiles = config.QueryConfigurations.RootNode.Value.Identifier = "";
            config.QueryConfigurations.RootNode.Children = new List <TreeNode <QueryConfiguration> >();
            config.SiteCode = config.Sites = sitecode;

            logging.Log.LOG(String.Format("Getting ready to use {0} workers to extract top level file from Vista", workerThreads.ToString()));

            IList <Task> allTasks = new List <Task>();

            for (int i = 0; i < startPoints.Count - 1; i++)
            {
                String start = startPoints[i];
                String end   = startPoints[i + 1];
                Task   vs    = new Task(() => extractWithStopIen(config, start, end));
                allTasks.Add(vs);
                vs.Start();
            }

            Task lastChunk = new Task(() => extractWithStopIen(config, startPoints[startPoints.Count - 1], range.Value));

            allTasks.Add(lastChunk);
            lastChunk.Start();

            foreach (Task t in allTasks)
            {
                t.Wait();
            }

            logging.Log.LOG("Finished extracting top level file from Vista!! Only CDW subfiles left...");

            MdoVistaDao.getInstance().shutdownPool(); // don't need our vista cxns any more - let's be nice and shutdown our connections

            try
            {
                createBcmaSubFileTables(sitecode, range.Key, range.Value);
            }
            catch (Exception exc)
            {
                logging.Log.LOG(exc.ToString());
            }

            try
            {
                logging.Log.LOG("Getting ready to update IEN tracking table...");
                ISqlDao dao = new SqlDaoFactory().getSqlDao(new SqlConnectionFactory().getConnection(ConfigurationManager.AppSettings["SqlProvider"], ConfigurationManager.AppSettings["SqlConnectionString"]));
                dao.saveLastIen(sitecode, "53.79", range.Key, range.Value, "20010101125959");
                logging.Log.LOG("Successfully updated IEN tracking table!");
            }
            catch (Exception exc)
            {
                logging.Log.LOG(String.Format("Oh geez... there was a problem updating the IEN tracking table with start IEN {0} and stop IEN {1} for site {2}", range.Key, range.Value, sitecode) + " - " + exc.Message);
            }

            logging.Log.LOG("Finished with hybrid BCMA extraction. Wicked cool.");

            return;
        }