public void Before(ExportInitializationResults results) { // check to make sure use has specified a non-negative index // for the extracted field if (ExtractedTextIndex < 0) { throw new InvalidOperationException( $"The specified extracted text index {this.ExtractedTextIndex} is not valid. " + "Please specify the index of the extracted text field in the returned collection"); } Console.WriteLine($"Current itemCount: {results.RecordCount}"); Console.WriteLine($"Run ID: {results.RunID}"); }
void Run(string[] args) { // Lets catch all exceptions because it's reasonable practice // in a language without checked exceptions try { // Get an instance if the Object Manager IObjectManager objectManager; try { objectManager = GetKeplerServiceFactory() .CreateProxy <Relativity.Services.Objects.IObjectManager>(); } catch (Exception exception) { Console.WriteLine(exception.Message); return; } // Initialize Export API using the properties set above Guid runId; long recordCount; List <FieldMetadata> fieldData; try { ExportInitializationResults exportInitializationResults = objectManager.InitializeExportAsync(WorkspaceId, QueryRequest, 0).Result; // Save infomation about this "run" runId = exportInitializationResults.RunID; recordCount = exportInitializationResults.RecordCount; fieldData = exportInitializationResults.FieldData; } catch (Exception exception) { Console.WriteLine(exception.Message); return; } Console.WriteLine("RunId " + runId + " will return " + recordCount + " documents"); Console.WriteLine(); // Get blocks of documents until no more left RelativityObjectSlim[] currentBlock = null; bool done = false; while (!done) { try { currentBlock = objectManager.RetrieveNextResultsBlockFromExportAsync( WorkspaceId, runId, 10).Result; } catch (Exception exception) { Console.WriteLine(exception.Message); return; } if (currentBlock == null || !currentBlock.Any()) { done = true; break; } Console.WriteLine("Got block of " + currentBlock.Count() + " documents"); Console.WriteLine(); // Print out each document's fields foreach (RelativityObjectSlim ros in currentBlock) { for (int i = 0; i < fieldData.Count; i++) { Console.WriteLine(fieldData[i].Name + ": " + ros.Values[i]); /* * * // If this field is long text and it contains * // only the streaming marker then stream. * * if (fieldData[i].FieldType == Relativity.Services.FieldType.LongText * && ros.Values[i].Equals(_SHIBBOLETH)) * { * Console.WriteLine("Text is too long, it must be streamed"); * Console.WriteLine(); * * RelativityObjectRef documentObjectRef = new RelativityObjectRef { ArtifactID = ros.ArtifactID }; * * using (IKeplerStream keplerStream = objectManager.StreamLongTextAsync(WorkspaceId, documentObjectRef, QueryRequest.Fields.ElementAt(i)).Result) * { * using (Stream realStream = keplerStream.GetStreamAsync().Result) * { * StreamReader reader = new StreamReader(realStream, Encoding.Unicode); * String line; * * while ((line = reader.ReadLine()) != null) * { * Console.Write(line); * } * Console.WriteLine(); * } * } * } */ } Console.WriteLine(); } Console.WriteLine("Block complete"); Console.WriteLine(); } Console.WriteLine("All blocks complete"); Console.WriteLine(); } catch (Exception exception) { Console.WriteLine(exception.Message); return; } }
public void Before(ExportInitializationResults results) { _fields = results.FieldData; Console.WriteLine("ExportApiHandler Before called with itemCount: " + results.RecordCount); }
public void Before(ExportInitializationResults results) { Console.WriteLine("Before"); Console.WriteLine(); _fieldData = results.FieldData; }
/// <summary> /// Generic export method /// </summary> /// <param name="objMgr"></param> /// <param name="workspaceId"></param> /// <param name="batchSize"></param> /// <param name="fields"></param> /// <param name="queryRequest"></param> /// <param name="outDirectory"></param> /// <returns></returns> private static void Export( IObjectManager objMgr, int workspaceId, int batchSize, List <Field> fields, QueryRequest queryRequest, string outDirectory) { // check if directory exists and delete it if it does if (Directory.Exists(outDirectory)) { Console.WriteLine("Deleting " + outDirectory); Directory.Delete(outDirectory, true); } Directory.CreateDirectory(outDirectory); // specify a load file name string loadFile = $"{outDirectory}\\load.csv"; // write all of the names of the fields to the load file // so they become column headings IEnumerable <string> fieldNames = fields.Select(x => x.Name); File.AppendAllText(loadFile, String.Join(",", fieldNames)); // convert to FieldRefs for the query IEnumerable <FieldRef> fieldRefs = fields.Select(x => new FieldRef { ArtifactID = x.ArtifactID }); queryRequest.Fields = fieldRefs; const int startPage = 0; // index of starting page // initialize export so we know how many documents total we // are exporting ExportInitializationResults initResults = objMgr.InitializeExportAsync(workspaceId, queryRequest, startPage).Result; long totalCount = initResults.RecordCount; // assign totalCount's length (number of digits) int numDigits = Common.CountBase10Digits(totalCount); Common.MaxDigits = numDigits; // if total count is evenly divisble by the // batch size, then we don't have to create any batches // for the leftovers long batchCountMaybe = totalCount / batchSize; long batchCountDefinitely = totalCount % batchSize == 0 ? batchCountMaybe : batchCountMaybe + 1; long currBatchCount = 1; while (true) { RelativityObjectSlim[] docBatch = objMgr.RetrieveNextResultsBlockFromExportAsync(workspaceId, initResults.RunID, batchSize).Result; if (docBatch == null || !docBatch.Any()) { break; } Console.WriteLine($"Exporting batch {currBatchCount} of {batchCountDefinitely} (size {docBatch.Length})."); foreach (RelativityObjectSlim obj in docBatch) { List <object> fieldValues = obj.Values; Common.AppendToLoadFileAsync( workspaceId, obj.ArtifactID, fields, fieldValues, loadFile); } currBatchCount++; } // finish up the queue Common.CompleteAddingBatches(); //Directory.Delete(outDirectory, true); }
private void Run() { // Create lists for the various thread types List <Thread> handerCallerThreads = new List <Thread>(); List <Thread> textStreamerThreads = new List <Thread>(); List <Thread> blockingConsumerThreads = new List <Thread>(); // Lets catch all exceptions and report them through the handler try { // Get Object Manager try { _objectManager = GetKeplerServiceFactory() .CreateProxy <Relativity.Services.Objects.IObjectManager>(); } catch (Exception exception) { _userHandler.Error("GetKeplerServiceFactory().CreateProxy failed", exception); return; } // Initialize Query ExportInitializationResults exportInitializationResults = null; try { exportInitializationResults = _objectManager.InitializeExportAsync(_workspaceId, _queryRequest, 0).Result; } catch (Exception exception) { _userHandler.Error("InitializeExportAsync failed", exception); return; } // Save Run info _runId = exportInitializationResults.RunID; _recordCount = exportInitializationResults.RecordCount; // Find indexes of all long text fields List <int> longTextIds = new List <int>(); for (int i = 0; i < exportInitializationResults.FieldData.Count; i++) { if (exportInitializationResults.FieldData[i].FieldType == FieldType.LongText) { longTextIds.Add(i); } } _longTextIds = longTextIds.ToArray(); // Call the handler's Before method _userHandler.Before(exportInitializationResults); // Create threads that reads blocks of documents for (int i = 0; i < _scaleFactor; i++) { Thread t = new Thread(BlockConsumer) { Name = "BlockConcumer" + i }; t.Start(); blockingConsumerThreads.Add(t); } // Create threads that open long text streams for (int i = 0; i < _scaleFactor; i++) { Thread t = new Thread(TextStreamer) { Name = "TextStreamer" + i }; t.Start(); textStreamerThreads.Add(t); } // Create threads that call the handler's Item method. // Use only a single thread if the handler has not // declared itself as thread safe. int handlerCallerThreadCount = _userHandler.ThreadSafe ? _scaleFactor : 1; for (int i = 0; i < handlerCallerThreadCount; i++) { Thread t = new Thread(HandlerCaller) { Name = "HandlerCaller" + i }; t.Start(); handerCallerThreads.Add(t); } } catch (Exception exception) { SendErrorAndCancel("Unexpected exception", exception); } // Wait for the threads reading blocks of documents // to complete foreach (Thread t in blockingConsumerThreads) { t.Join(); } // Indicate the we will add no more documents // to the standard and stream queues _standardQueue.CompleteAdding(); _streamQueue.CompleteAdding(); // Wait for the threads opening streams to complete foreach (Thread t in textStreamerThreads) { t.Join(); } // Indicate that no more documents with open // streams will be added to that queue _openStreamQueue.CompleteAdding(); // Wait for all the documents remaining // in the standard and open stream queues // to be sent to the handler foreach (Thread t in handerCallerThreads) { t.Join(); } // Call the handler's After method _userHandler.After(!_cancellationToken.IsCancellationRequested); }