internal ImportDataResult getImportDataResult(int start_on_line = 1) { ImportDataResult dataresult = new ImportDataResult(); var rows = new List <JObject>(); var num_to_skip = start_on_line - 1; List <string> columns = new List <string>(); using (var reader = new StreamReader(fileName)) { for (int i = 0; i < num_to_skip; i++) { reader.ReadLine(); //skip one line. } string[] headers = reader.ReadLine().Split('\t'); foreach (string header in headers) { string header_val = header; if (header == "" || header == null) { header_val = "-blank-"; } columns.Add(header_val.Replace("\"", "")); } dataresult.columns = columns; //now we have the columns... lets get the rows! string wholeline; string[] line; while ((wholeline = reader.ReadLine()) != null) //NOTE: reader.EndOfStream does NOT work here. :( { //logger.Debug(wholeline); line = wholeline.Split('\t'); var idx = 0; var row = new JObject(); //iterate our columns and copy foreach (var column in columns) { row.Add(new JProperty(column, line[idx])); idx++; } rows.Add(row); } dataresult.rows = rows; } return(dataresult); }
internal ImportDataResult getImportDataResult(int start_on_line = 1) { ImportDataResult dataresult = new ImportDataResult(); var rows = new List<JObject>(); var num_to_skip = start_on_line - 1; List<string> columns = new List<string>(); using (var reader = new StreamReader(fileName)) { for (int i = 0; i < num_to_skip; i++) { reader.ReadLine(); //skip one line. } string[] headers = reader.ReadLine().Split('\t'); foreach (string header in headers) { string header_val = header; if (header == "" || header == null) header_val = "-blank-"; columns.Add(header_val.Replace("\"","")); } dataresult.columns = columns; //now we have the columns... lets get the rows! string wholeline; string[] line; while ((wholeline = reader.ReadLine()) != null) //NOTE: reader.EndOfStream does NOT work here. :( { //logger.Debug(wholeline); line = wholeline.Split('\t'); var idx = 0; var row = new JObject(); //iterate our columns and copy foreach (var column in columns) { row.Add(new JProperty(column, line[idx])); idx++; } rows.Add(row); } dataresult.rows = rows; } return dataresult; }
internal ImportDataResult getImportDataResult() { // The first 24 lines are the "header" and need to be parsed out: // FILE TYPE : TAGGING // PROGRAM VERSION : PITTAG3 1.5.4 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // MEACHAM CONTROL UNIT #2 RECAP // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // FILE TITLE : CRC14169.R02 // TAG DATE : 06/18/14 10:28 // TAGGER : THOMPSON D // HATCHERY SITE : // STOCK : // BROOD YR : // MIGRATORY YR : 14 // TAG SITE : MEACHC // RACEWAY/TRANSECT : // CAPTURE METHOD : SHOCK // TAGGING TEMP : 10.5 // POST TAGGING TEMP : 10.7 // RELEASE WATER TEMP : 10.2 // TAGGING METHOD : HAND // ORGANIZATION : CTUIR // COORDINATOR ID : CRC // RELEASE DATE : 06/18/14 10:40 // RELEASE SITE : MEACHC // RELEASE RIVER KM : 465.127.014 ImportDataResult dataresult = new ImportDataResult(); var rows = new List<Dictionary<string, string>>(); dataresult.columns = colNames; using (var reader = new StreamReader(fileName)) { // Skip any leading blank lines... shouldn't be any, but you never know string line = ""; // Inject some default values Store(new Tuple<string, string>(FISH_COUNT, "1")); while (line.Trim().IsEmpty()) line = reader.ReadLine(); // Now process lines until we hit another blank line while (!line.Trim().IsEmpty()) { Store(GetNameAndValue(line)); line = reader.ReadLine(); } // Fix up dates in the header StandardizeDateTime("TAG DATE"); StandardizeDateTime("RELEASE DATE"); // After the 2nd blank line, we're in the data section... Read until we hit a non-blank. while (line.Trim().IsEmpty()) line = reader.ReadLine(); // Again process until we hit a blank line while (!line.Trim().IsEmpty()) { // Now we have column-delineated data: // 4 384.3B23AF7939 95 9.3 32W ||Unit 2 Scale 42 // RECORD_ID, PIT_TAG_NUMBER, FORK_LENGTH, WEIGHT, TYPE_RUN_REARING, COMMENT_1, COMMENT_2, COMMENT_3 Store(new Tuple<string,string>(RECORD_ID, line.Substring(0, 4).Trim())); Store(new Tuple<string, string>(PIT_TAG_NUMBER, line.Substring(5, 15).Trim())); Store(new Tuple<string, string>(FORK_LENGTH, line.Substring(23, 6).Trim())); Store(new Tuple<string, string>(WEIGHT, line.Substring(30, 9).Trim())); Store(new Tuple<string, string>(TYPE_RUN_REARING, line.Substring(40, 4).Trim())); // Now comes the pipe-delimited stuff... var rest = line.Substring(45).Trim().Split('|'); Store(new Tuple<string, string>(COMMENT_1, rest.Length > 0 ? rest[0] : "")); Store(new Tuple<string, string>(COMMENT_2, rest.Length > 1 ? rest[1] : "")); Store(new Tuple<string, string>(COMMENT_3, rest.Length > 2 ? rest[2] : "")); var row = new Dictionary<string, string>(); for (var i = 0; i < colNames.Count; i++) { var colName = RenameHeaderValue(colNames[i]); row[colName] = colVals[i]; } rows.Add(row); line = reader.ReadLine(); } } RenameHeaders(); dataresult.columns = colNames; dataresult.rows = rows; return dataresult; }
public Task<HttpResponseMessage> UploadImportFile() { logger.Debug("starting to process incoming files."); if (!Request.Content.IsMimeMultipartContent()) { throw new HttpResponseException(HttpStatusCode.UnsupportedMediaType); } string root = System.Web.HttpContext.Current.Server.MapPath("~/uploads"); string rootUrl = Request.RequestUri.AbsoluteUri.Replace(Request.RequestUri.AbsolutePath, String.Empty); logger.Debug("saving files to location: " + root); logger.Debug(" and the root url = " + rootUrl); var provider = new MultipartFormDataStreamProvider(root); User me = AuthorizationManager.getCurrentUser(); var db = ServicesContext.Current; var task = Request.Content.ReadAsMultipartAsync(provider). ContinueWith<HttpResponseMessage>(o => { if (o.IsFaulted || o.IsCanceled) { logger.Debug("Error: " + o.Exception.Message); throw new HttpResponseException(Request.CreateErrorResponse(HttpStatusCode.InternalServerError, o.Exception)); } //Look up our project Int32 ProjectId = Convert.ToInt32(provider.FormData.Get("ProjectId")); logger.Debug("And we think the projectid === " + ProjectId); Project project = db.Projects.Find(ProjectId); if (!project.isOwnerOrEditor(me)) throw new Exception("Authorization error."); var newFileName = ""; foreach (MultipartFileData file in provider.FileData) { logger.Debug("Filename = " + file.LocalFileName); logger.Debug("Orig = " + file.Headers.ContentDisposition.FileName); logger.Debug("Name? = " + file.Headers.ContentDisposition.Name); var fileIndex = ActionController.getFileIndex(file.Headers.ContentDisposition.Name); //"uploadedfile0" -> 0 var filename = file.Headers.ContentDisposition.FileName; filename = filename.Replace("\"", string.Empty); if (!String.IsNullOrEmpty(filename)) { try { newFileName = ActionController.relocateProjectFile( file.LocalFileName, ProjectId, filename, true); /* File newFile = new File(); newFile.Title = provider.FormData.Get("Title_" + fileIndex); //"Title_1, etc. newFile.Description = provider.FormData.Get("Description_" + fileIndex); //"Description_1, etc. newFile.Name = info.Name;//.Headers.ContentDisposition.FileName; newFile.Link = rootUrl + "/services/uploads/" + ProjectId + "/" + info.Name; //file.LocalFileName; newFile.Size = (info.Length / 1024).ToString(); //file.Headers.ContentLength.ToString(); newFile.FileTypeId = FileType.getFileTypeFromFilename(info); newFile.UserId = me.Id; logger.Debug(" Adding file " + newFile.Name + " at " + newFile.Link); files.Add(newFile); */ } catch (Exception e) { logger.Debug("Error: " + e.ToString()); } } } logger.Debug("Done saving files."); ImportDataResult data = new ImportDataResult(); var info = new System.IO.FileInfo(newFileName); //process the file and return all the data! //TODO: refactor this into import plugins via polymorphism. ... but maybe this is enough. :) //CSV or Excel are the only filetypes currently supported. if (info.Extension == ".xls" || info.Extension == ".xlsx") { logger.Debug("Looks like an excel file!"); ExcelReader reader = new ExcelReader(newFileName); //ExcelReader doesn't support starting on a certain line for column names... we always assume col 1 data.columns = reader.getColumns(); data.rows = reader.getData().First().Table; reader.close(); } else if (info.Extension == ".csv") { logger.Debug("Looks like a csv file!"); Int32 StartOnLine = Convert.ToInt32(provider.FormData.Get("StartOnLine")); //only applicable to T/CSV CSVReader reader = new CSVReader(newFileName); data = reader.getImportDataResult(StartOnLine); // we do it all in one. } else if (info.Extension == ".tsv") { logger.Debug("Looks like a tsv file!"); Int32 StartOnLine = Convert.ToInt32(provider.FormData.Get("StartOnLine")); //only applicable to T/CSV TSVReader reader = new TSVReader(newFileName); data = reader.getImportDataResult(StartOnLine); // we do it all in one. } else { logger.Debug("Looks like an unknown file!"); throw new Exception("File type not compatible. We can do Excel (xls/xslx), CSV (csv) and TSV (tsv)."); } string result = JsonConvert.SerializeObject(data); //TODO: actual error/success message handling //string result = "{\"message\": \"Success\"}"; HttpResponseMessage resp = new HttpResponseMessage(System.Net.HttpStatusCode.OK); resp.Content = new System.Net.Http.StringContent(result, System.Text.Encoding.UTF8, "text/plain"); //to stop IE from being stupid. return resp; }); return task; }
internal ImportDataResult getImportDataResult() { // The first 24 lines are the "header" and need to be parsed out: // FILE TYPE : TAGGING // PROGRAM VERSION : PITTAG3 1.5.4 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // MEACHAM CONTROL UNIT #2 RECAP // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // FILE TITLE : CRC14169.R02 // TAG DATE : 06/18/14 10:28 // TAGGER : THOMPSON D // HATCHERY SITE : // STOCK : // BROOD YR : // MIGRATORY YR : 14 // TAG SITE : MEACHC // RACEWAY/TRANSECT : // CAPTURE METHOD : SHOCK // TAGGING TEMP : 10.5 // POST TAGGING TEMP : 10.7 // RELEASE WATER TEMP : 10.2 // TAGGING METHOD : HAND // ORGANIZATION : CTUIR // COORDINATOR ID : CRC // RELEASE DATE : 06/18/14 10:40 // RELEASE SITE : MEACHC // RELEASE RIVER KM : 465.127.014 ImportDataResult dataresult = new ImportDataResult(); var rows = new List <Dictionary <string, string> >(); dataresult.columns = colNames; using (var reader = new StreamReader(fileName)) { // Skip any leading blank lines... shouldn't be any, but you never know string line = ""; // Inject some default values Store(new Tuple <string, string>(FISH_COUNT, "1")); while (line.Trim().IsEmpty()) { line = reader.ReadLine(); } // Now process lines until we hit another blank line while (!line.Trim().IsEmpty()) { Store(GetNameAndValue(line)); line = reader.ReadLine(); } // Fix up dates in the header StandardizeDateTime("TAG DATE"); StandardizeDateTime("RELEASE DATE"); // After the 2nd blank line, we're in the data section... Read until we hit a non-blank. while (line.Trim().IsEmpty()) { line = reader.ReadLine(); } // Again process until we hit a blank line while (!line.Trim().IsEmpty()) { // Now we have column-delineated data: // 4 384.3B23AF7939 95 9.3 32W ||Unit 2 Scale 42 // RECORD_ID, PIT_TAG_NUMBER, FORK_LENGTH, WEIGHT, TYPE_RUN_REARING, COMMENT_1, COMMENT_2, COMMENT_3 Store(new Tuple <string, string>(RECORD_ID, line.Substring(0, 4).Trim())); Store(new Tuple <string, string>(PIT_TAG_NUMBER, line.Substring(5, 15).Trim())); Store(new Tuple <string, string>(FORK_LENGTH, line.Substring(23, 6).Trim())); Store(new Tuple <string, string>(WEIGHT, line.Substring(30, 9).Trim())); Store(new Tuple <string, string>(TYPE_RUN_REARING, line.Substring(40, 4).Trim())); // Now comes the pipe-delimited stuff... var rest = line.Substring(45).Trim().Split('|'); Store(new Tuple <string, string>(COMMENT_1, rest.Length > 0 ? rest[0] : "")); Store(new Tuple <string, string>(COMMENT_2, rest.Length > 1 ? rest[1] : "")); Store(new Tuple <string, string>(COMMENT_3, rest.Length > 2 ? rest[2] : "")); var row = new Dictionary <string, string>(); for (var i = 0; i < colNames.Count; i++) { var colName = RenameHeaderValue(colNames[i]); row[colName] = colVals[i]; } rows.Add(row); line = reader.ReadLine(); } } RenameHeaders(); dataresult.columns = colNames; dataresult.rows = rows; return(dataresult); }
public Task<HttpResponseMessage> UploadImportFile() { logger.Debug("Inside DataActionController, UploadImportFile..."); logger.Debug("starting to process incoming files."); if (!Request.Content.IsMimeMultipartContent()) { throw new HttpResponseException(HttpStatusCode.UnsupportedMediaType); } string root = System.Web.HttpContext.Current.Server.MapPath("~/uploads"); string rootUrl = Request.RequestUri.AbsoluteUri.Replace(Request.RequestUri.AbsolutePath, String.Empty); logger.Debug("saving files to location: " + root); logger.Debug(" and the root url = " + rootUrl); var provider = new MultipartFormDataStreamProvider(root); User me = AuthorizationManager.getCurrentUser(); var db = ServicesContext.Current; var task = Request.Content.ReadAsMultipartAsync(provider).ContinueWith(o => { if (o.IsFaulted || o.IsCanceled) { logger.Debug("Error: " + o.Exception.Message); throw new HttpResponseException(Request.CreateErrorResponse(HttpStatusCode.InternalServerError, o.Exception)); } //Look up our project Int32 ProjectId = Convert.ToInt32(provider.FormData.Get("ProjectId")); logger.Debug("And we think the projectid === " + ProjectId); Project project = db.Projects.Find(ProjectId); logger.Debug("Project = " + project); if (!project.isOwnerOrEditor(me)) throw new Exception("Authorization error: The user trying to import is neither an Owner nor an Editor."); else logger.Debug("User authorized = " + me); var newFileName = ""; foreach (MultipartFileData file in provider.FileData) { logger.Debug("Filename = " + file.LocalFileName); logger.Debug("Orig = " + file.Headers.ContentDisposition.FileName); logger.Debug("Name? = " + file.Headers.ContentDisposition.Name); var fileIndex = ActionController.getFileIndex(file.Headers.ContentDisposition.Name); //"uploadedfile0" -> 0 var filename = file.Headers.ContentDisposition.FileName; filename = filename.Replace("\"", string.Empty); if (!String.IsNullOrEmpty(filename)) { try { newFileName = ActionController.relocateProjectFile( file.LocalFileName, ProjectId, filename, true); /* File newFile = new File(); newFile.Title = provider.FormData.Get("Title_" + fileIndex); //"Title_1, etc. newFile.Description = provider.FormData.Get("Description_" + fileIndex); //"Description_1, etc. newFile.Name = info.Name;//.Headers.ContentDisposition.FileName; newFile.Link = rootUrl + "/services/uploads/" + ProjectId + "/" + info.Name; //file.LocalFileName; newFile.Size = (info.Length / 1024).ToString(); //file.Headers.ContentLength.ToString(); newFile.FileTypeId = FileType.getFileTypeFromFilename(info); newFile.UserId = me.Id; logger.Debug(" Adding file " + newFile.Name + " at " + newFile.Link); files.Add(newFile); */ } catch (Exception e) { logger.Debug("Error: " + e.ToString()); } } } logger.Debug("Done saving files."); var data = new ImportDataResult(); var info = new FileInfo(newFileName); // Process the file and return all the data! /* Note: According to Colette, if someone tries to upload a file with an odd extension (.lkg, .fld, MCR, BC1, etc.), * while the extension may vary, it will almost always be a ScrewTrap-PITAGIS related file. * Therefore, we are allowing a wide range of variation in the extensions. */ //var regex = new Regex(@"\.(m|r|ur|mc)\d+$"); //var regexNums = new Regex(@"\.(m|r|ur|mc|bc)\d+$"); //var regexChars = new Regex(@"\.(m|r|ur|mc|bc)\D+$"); var regexNums = new Regex(@"\.(m|r|ur|mc|bc|nb)\d+$"); var regexChars = new Regex(@"\.(m|r|ur|mc|bc|nb)\D+$"); var extension = info.Extension.ToLower(); logger.Debug("extension = " + extension); if (extension == ".xls" || extension == ".xlsx") { logger.Debug("Looks like an excel file!"); var reader = new ExcelReader(newFileName); //ExcelReader doesn't support starting on a certain line for column names... we always assume col 1 data.columns = reader.getColumns(); data.rows = reader.getData().First().Table; reader.close(); } else if (extension == ".csv") { logger.Debug("Looks like a csv file!"); var StartOnLine = Convert.ToInt32(provider.FormData.Get("StartOnLine")); //only applicable to T/CSV var reader = new CSVReader(newFileName); data = reader.getImportDataResult(StartOnLine); // we do it all in one. } else if (extension == ".tsv") { logger.Debug("Looks like a tsv file!"); var StartOnLine = Convert.ToInt32(provider.FormData.Get("StartOnLine")); //only applicable to T/CSV var reader = new TSVReader(newFileName); data = reader.getImportDataResult(StartOnLine); // we do it all in one. } //else if (extension == ".lkg" || extension == ".fld" || regex.Match(extension).Success) else if (extension == ".lkg" || extension == ".fld" || regexNums.Match(extension).Success || regexChars.Match(extension).Success) { logger.Debug("Looks like a PITAGIS file!"); var reader = new PitagisReader(newFileName); data = reader.getImportDataResult(); // we do it all in one. } else { logger.Debug("Looks like an unknown file!"); throw new Exception("File type not compatible. We can do Excel (xls/xslx), CSV (csv), TSV (tsv), and PITAGIS (.lkg/.fld/.m01/.r01/.ur1/.mc1)."); } var result = JsonConvert.SerializeObject(data); //TODO: actual error/success message handling //string result = "{\"message\": \"Success\"}"; var resp = new HttpResponseMessage(HttpStatusCode.OK); resp.Content = new StringContent(result, System.Text.Encoding.UTF8, "text/plain"); //to stop IE from being stupid. return resp; }); return task; }