private static void ArgumentValidation(Action <MergePurgeParam> processData) { if (processData == null) { throw new ArgumentNullException(SortHelpers.GetParameterName(new { processData }), "The processData action method must be defined."); } }
public static void Validate(Action <string[], string, string[]> setKeys) { if (setKeys == null) { throw new ArgumentNullException(SortHelpers.GetParameterName(new { setKeys }), "A setKeys function must be defined."); } }
/// <summary> /// Performs a down-heap or sink-down operation for a max-heap. /// </summary> /// <param name="values">The values.</param> /// <param name="heapSize">The heap size.</param> /// <param name="i">The index to start at when sinking down.</param> /// <param name="progressCallback">The progress callback.</param> private void Sink(int[] values, int heapSize, int i, Action <SortProgress> progressCallback) { int largest = i; int left = (2 * i) + 1; int right = (2 * i) + 2; if (left < heapSize && values[left] > values[largest]) { largest = left; } if (right < heapSize && values[right] > values[largest]) { largest = right; } if (largest != i) { progressCallback?.Invoke(new SortProgress(new[] { largest }, values)); SortHelpers.Swap(values, i, largest); progressCallback?.Invoke(new SortProgress(new[] { i }, values)); this.Sink(values, heapSize, largest, progressCallback); } }
/// <summary> /// Writes an array of logging events to the log target. By default it iterates on all /// events and passes them to "Write" method. Inheriting classes can use this method to /// optimize batch writes. /// </summary> /// <param name="logEvents">Logging events to be written out.</param> protected override Task WriteAsyncTask(IList<LogEventInfo> logEvents, CancellationToken cancellationToken) { //must sort into containers and then into the blobs for the container if (_getContainerBlobNameDelegate == null) _getContainerBlobNameDelegate = logEvent => new ContainerBlobKey(RenderLogEvent(Container, logEvent), RenderLogEvent(BlobName, logEvent)); if (logEvents.Count == 1) { return WriteToBlobAsync(logEvents, RenderLogEvent(Container, logEvents[0]), RenderLogEvent(BlobName, logEvents[0]), cancellationToken); } var partitionBuckets = SortHelpers.BucketSort(logEvents, _getContainerBlobNameDelegate); IList<Task> multipleTasks = partitionBuckets.Count > 1 ? new List<Task>(partitionBuckets.Count) : null; foreach (var partitionBucket in partitionBuckets) { try { var sendTask = WriteToBlobAsync(partitionBucket.Value, partitionBucket.Key.ContainerName, partitionBucket.Key.BlobName, cancellationToken); if (multipleTasks == null) return sendTask; else multipleTasks.Add(sendTask); } catch (Exception ex) { InternalLogger.Error(ex, "AzureBlobStorage(Name={0}): Failed to write {1} logevents to blob. ContainerName={2}, BlobName={3}", Name, partitionBucket.Value.Count, partitionBucket.Key.ContainerName, partitionBucket.Key.BlobName); if (multipleTasks == null) throw; } } return Task.WhenAll(multipleTasks ?? new Task[0]); }
private void SortDescriptionsCollectionChanged(object sender, NotifyCollectionChangedEventArgs e) { // clear all the headers of sort order foreach (DataGridColumn column in Columns) { string sortPath = SortHelpers.GetSortMemberPath(column); if (sortPath != null) { column.Header = sortPath; } } // add sort order int sortIndex = 0; foreach (SortDescription sortDesc in Items.SortDescriptions) { foreach (DataGridColumn column in Columns) { if (sortDesc.PropertyName == SortHelpers.GetSortMemberPath(column)) { var sb = new StringBuilder(); sb.Append(sortDesc.PropertyName); if (Items.SortDescriptions.Count > 1 && ShowSortOrder) { sb.Append(string.Format(" (Sort Order: {0})", sortIndex)); column.Header = sb.ToString(); } } } sortIndex++; } }
protected void ApplyOrder(Func <SortOrder, Func <IEnumerable <T>, IOrderedEnumerable <T> > > orderFunc, ref SortOrder sortOrder) { this.SongOrderFunc = orderFunc(sortOrder); SortHelpers.InverseOrder(ref sortOrder); this.SelectableSongs = this.SongOrderFunc(this.SelectableSongs); }
public void OrderYoutubeSongsByTitle() { this.youtubeSongOrderFunc = SortHelpers.GetOrderByTitle <YoutubeSong>(this.currentYoutubeSongTitleOrder); SortHelpers.InverseOrder(ref this.currentYoutubeSongTitleOrder); this.OnPropertyChanged(vm => vm.SelectableYoutubeSongs); }
public static void Sort(int[] array) { if (array.Length == 0) { return; } var lastIndex = array.Length; var swapExists = false; while (lastIndex != 1) { for (var j = 0; j < lastIndex - 1; ++j) { if (array[j] > array[j + 1]) { SortHelpers.Swap(array, j + 1, j); swapExists = true; } } if (!swapExists) { break; } --lastIndex; } }
private static void ArgumentValidation <T>(DelimitedFileSource <T> delimitedFileSource) { if (string.IsNullOrWhiteSpace(delimitedFileSource.Delimiter)) { throw new ArgumentNullException(SortHelpers.GetParameterName(new { delimitedFileSource.Delimiter }), "The delimiter can not be null or empty."); } }
public void OrderLocalSongsByTitle() { this.localSongOrderFunc = SortHelpers.GetOrderByTitle <Song>(this.currentLocalSongTitleOrder); SortHelpers.InverseOrder(ref this.currentLocalSongTitleOrder); this.OnPropertyChanged(vm => vm.SelectableLocalSongs); }
public static void Validate(string delimiter) { if (string.IsNullOrEmpty(delimiter)) { throw new ArgumentNullException(SortHelpers.GetParameterName(new { delimiter }), "The delimiter can not be null or empty."); } }
private void DataGridStandardSorting(object sender, DataGridSortingEventArgs e) { string sortPropertyName = SortHelpers.GetSortMemberPath(e.Column); if (!string.IsNullOrEmpty(sortPropertyName)) { // sorting is cleared when the previous state is Descending if (e.Column.SortDirection.HasValue && e.Column.SortDirection.Value == ListSortDirection.Descending) { int index = SortHelpers.FindSortDescription(Items.SortDescriptions, sortPropertyName); if (index != -1) { e.Column.SortDirection = null; // remove the sort description Items.SortDescriptions.RemoveAt(index); Items.Refresh(); if ((Keyboard.Modifiers & ModifierKeys.Shift) != ModifierKeys.Shift) { // clear any other sort descriptions for the multisorting case Items.SortDescriptions.Clear(); Items.Refresh(); } // stop the default sort e.Handled = true; } } } }
protected override Task WriteAsyncTask(IList <LogEventInfo> logEvents, CancellationToken cancellationToken) { if (_getEventHubPartitionKeyDelegate == null) { _getEventHubPartitionKeyDelegate = l => RenderLogEvent(PartitionKey, l); } if (logEvents.Count == 1) { var eventDataList = CreateEventDataList(logEvents, out var eventDataSize); return(WriteSingleBatchAsync(eventDataList, _getEventHubPartitionKeyDelegate(logEvents[0]))); } var partitionBuckets = SortHelpers.BucketSort(logEvents, _getEventHubPartitionKeyDelegate); IList <Task> multipleTasks = partitionBuckets.Count > 1 ? new List <Task>(partitionBuckets.Count) : null; foreach (var partitionBucket in partitionBuckets) { try { var eventDataList = CreateEventDataList(partitionBucket.Value, out var eventDataSize); Task sendTask = Task.CompletedTask; int batchSize = CalculateBatchSize(eventDataList, eventDataSize); if (eventDataList.Count <= batchSize) { sendTask = WriteSingleBatchAsync(eventDataList, partitionBucket.Key); } else { // Must chain the tasks together so they don't run concurrently foreach (var batchItem in GenerateBatches(eventDataList, batchSize)) { string partitionKey = partitionBucket.Key; sendTask = sendTask.ContinueWith(async p => await WriteSingleBatchAsync(batchItem, partitionKey).ConfigureAwait(false), cancellationToken); } } if (multipleTasks == null) { return(sendTask); } else { multipleTasks.Add(sendTask); } } catch (Exception ex) { InternalLogger.Error(ex, "AzureEventHub(Name={0}): Failed to create EventData batch.", Name); if (multipleTasks == null) { throw; } } } return(multipleTasks?.Count > 0 ? Task.WhenAll(multipleTasks) : Task.CompletedTask); }
public static void Validate(string sourcefilePath, Action <string, string[]> setKeys, string destinationFolder) { if (setKeys == null) { throw new ArgumentNullException(SortHelpers.GetParameterName(new { setKeys }), "A setKeys function must be defined."); } Validate(sourcefilePath, destinationFolder); }
public static void Validate <T>(string sourcefilePath, Func <string, T> getKey, string destinationFolder, int maxBatchSize) { if (getKey == null) { throw new ArgumentNullException(SortHelpers.GetParameterName(new { getKey }), "A GetKey function must be defined."); } Validate(sourcefilePath, destinationFolder); Validate(maxBatchSize); }
/// <summary> /// Writes an array of logging events to the log target. By default it iterates on all /// events and passes them to "Write" method. Inheriting classes can use this method to /// optimize batch writes. /// </summary> /// <param name="logEvents">Logging events to be written out.</param> protected override Task WriteAsyncTask(IList <LogEventInfo> logEvents, CancellationToken cancellationToken) { //must sort into containers and then into the blobs for the container if (_getContainerBlobNameDelegate == null) { _getContainerBlobNameDelegate = logEvent => new ContainerBlobKey(RenderLogEvent(Container, logEvent), RenderLogEvent(BlobName, logEvent)); } if (logEvents.Count == 1) { var containerName = RenderLogEvent(Container, logEvents[0]); var blobName = RenderLogEvent(BlobName, logEvents[0]); try { var blobPayload = CreateBlobPayload(logEvents); return(WriteToBlobAsync(blobPayload, containerName, blobName, cancellationToken)); } catch (Exception ex) { InternalLogger.Error(ex, "AzureBlobStorage(Name={0}): Failed writing {1} logevents to BlobName={2} in ContainerName={3}", Name, 1, blobName, containerName); throw; } } var partitionBuckets = SortHelpers.BucketSort(logEvents, _getContainerBlobNameDelegate); IList <Task> multipleTasks = partitionBuckets.Count > 1 ? new List <Task>(partitionBuckets.Count) : null; foreach (var partitionBucket in partitionBuckets) { var containerName = partitionBucket.Key.ContainerName; var blobName = partitionBucket.Key.BlobName; var bucketSize = partitionBucket.Value.Count; try { var blobPayload = CreateBlobPayload(partitionBucket.Value); var sendTask = WriteToBlobAsync(blobPayload, containerName, blobName, cancellationToken); if (multipleTasks == null) { return(sendTask); } multipleTasks.Add(sendTask); } catch (Exception ex) { InternalLogger.Error(ex, "AzureBlobStorage(Name={0}): Failed writing {1} logevents to BlobName={2} in ContainerName={3}", Name, bucketSize, blobName, containerName); if (multipleTasks == null) { throw; } } } return(Task.WhenAll(multipleTasks ?? new Task[0])); }
private static void ArgumentValidation <T>(FixedWidthFileSource <T> fixedWidthFileSource) { if (fixedWidthFileSource.FixedWidths == null) { throw new ArgumentNullException(SortHelpers.GetParameterName(new { fixedWidthFileSource.FixedWidths }), "The file source fixed widths can not be null."); } if (fixedWidthFileSource.FixedWidths.Length == 0) { throw new ArgumentException("The file source fixed widths cannot be empty.", SortHelpers.GetParameterName(new { fixedWidthFileSource.FixedWidths })); } }
public static void Validate(int maxBatchSize) { if (maxBatchSize <= 0) { throw new ArgumentNullException(SortHelpers.GetParameterName(new { maxBatchSize }), "The maxBatchSize must be greater than zero."); } else if (maxBatchSize > int.MaxValue) { throw new ArgumentNullException(SortHelpers.GetParameterName(new { maxBatchSize }), "The maxBatchSize can not be greater than integer maximum value."); } }
private static void ArgumentValidation(IFileSource fileSource) { if (string.IsNullOrWhiteSpace(fileSource.SourceFilePath)) { throw new ArgumentNullException(SortHelpers.GetParameterName(new { fileSource }), "The sourceFilePath cannot be null or empty."); } if (!File.Exists(fileSource.SourceFilePath)) { throw new FileNotFoundException(string.Format("The sourceFilePath , {0} , does not exist.", fileSource.SourceFilePath)); } }
/// <summary> /// Bubble sorts input values and optionally report the progress. /// </summary> /// <param name="values">The values to sort.</param> /// <param name="progressCallback">The optional progress callback.</param> public void Sort(int[] values, Action <SortProgress> progressCallback = null) { for (var write = 0; write < values.Length; write++) { for (var sort = 0; sort < values.Length - 1; sort++) { if (values[sort] > values[sort + 1]) { SortHelpers.Swap(values, sort + 1, sort); progressCallback?.Invoke(new SortProgress(new[] { sort + 1 }, values)); } } } }
public static void Validate(string sourcefilePath, string destinationFolder) { if (string.IsNullOrWhiteSpace(sourcefilePath)) { throw new ArgumentNullException(SortHelpers.GetParameterName(new { sourcefilePath }), "The sourceFilePath cannot be null or empty."); } if (!File.Exists(sourcefilePath)) { throw new FileNotFoundException("The sourceFilePath , " + sourcefilePath + " , does not exist."); } if (destinationFolder != null && !Directory.Exists(destinationFolder)) { throw new DirectoryNotFoundException("The destination folder, " + destinationFolder + " , does not exist."); } }
/// <summary> /// Heap sorts input values and optionally report the progress. /// </summary> /// <param name="values">The values to sort.</param> /// <param name="progressCallback">The optional progress callback.</param> public void Sort(int[] values, Action <SortProgress> progressCallback = null) { var heapSize = values.Length; this.BuildHeap(values, progressCallback); for (var i = heapSize - 1; i >= 1; i--) { progressCallback?.Invoke(new SortProgress(new[] { 0 }, values)); SortHelpers.Swap(values, i, 0); progressCallback?.Invoke(new SortProgress(new[] { i }, values)); heapSize--; this.Sink(values, heapSize, 0, progressCallback); } }
/// <summary> /// Writes an array of logging events to the log target. By default it iterates on all /// events and passes them to "Write" method. Inheriting classes can use this method to /// optimize batch writes. /// </summary> /// <param name="logEvents">Logging events to be written out.</param> protected override void Write(AsyncLogEventInfo[] logEvents) { var buckets = SortHelpers.BucketSort(logEvents, c => this.BuildConnectionString(c.LogEvent)); try { foreach (var kvp in buckets) { foreach (AsyncLogEventInfo ev in kvp.Value) { try { this.WriteEventToDatabase(ev.LogEvent); ev.Continuation(null); } catch (Exception exception) { // in case of exception, close the connection and report it InternalLogger.Error(exception, "Error when writing to database."); if (exception.MustBeRethrownImmediately()) { throw; } InternalLogger.Trace("DatabaseTarget: close connection because of exception"); this.CloseConnection(); ev.Continuation(exception); if (exception.MustBeRethrown()) { throw; } } } } } finally { if (!this.KeepConnection) { InternalLogger.Trace("DatabaseTarget: close connection because of KeepConnection=false"); this.CloseConnection(); } } }
private static int Partition(int[] arr, int start, int end) { var pivot = arr[end]; var smallerElIndex = start - 1; for (var i = 0; i < arr.Length; ++i) { if (arr[i] < pivot) { SortHelpers.Swap(arr, i, smallerElIndex); ++smallerElIndex; } } ++smallerElIndex; SortHelpers.Swap(arr, smallerElIndex, end); return(smallerElIndex); }
public int CompareTo(ArtistViewModel other) { if (this.IsAllArtists && other.IsAllArtists) { return(0); } if (this.IsAllArtists) { return(-1); } if (other.IsAllArtists) { return(1); } return(String.Compare(SortHelpers.RemoveArtistPrefixes(this.Name), SortHelpers.RemoveArtistPrefixes(other.Name), StringComparison.OrdinalIgnoreCase)); }
protected override Task WriteAsyncTask(IList <LogEventInfo> logEvents, CancellationToken cancellationToken) { if (_getEventHubPartitionKeyDelegate == null) { _getEventHubPartitionKeyDelegate = l => RenderLogEvent(PartitionKey, l); } if (logEvents.Count == 1) { var eventDataBatch = CreateEventDataBatch(logEvents, out var eventDataSize); return(WriteSingleBatchAsync(eventDataBatch, _getEventHubPartitionKeyDelegate(logEvents[0]))); } var partitionBuckets = SortHelpers.BucketSort(logEvents, _getEventHubPartitionKeyDelegate); IList <Task> multipleTasks = partitionBuckets.Count > 1 ? new List <Task>(partitionBuckets.Count) : null; foreach (var partitionBucket in partitionBuckets) { try { var eventDataBatch = CreateEventDataBatch(partitionBucket.Value, out var eventDataSize); Task sendTask = WritePartitionBucketAsync(eventDataBatch, partitionBucket.Key, eventDataSize); if (multipleTasks == null) { return(sendTask); } multipleTasks.Add(sendTask); } catch (Exception ex) { InternalLogger.Error(ex, "AzureEventHub(Name={0}): Failed to create EventData batch.", Name); if (multipleTasks == null) { throw; } } } return(multipleTasks?.Count > 0 ? Task.WhenAll(multipleTasks) : Task.CompletedTask); }
/// <summary> /// Takes last element as pivot, places the pivot element at its correct position in sorted array, /// and places all smaller (smaller than pivot) to left of pivot and all greater elements to right of pivot. /// </summary> /// <param name="values">The values.</param> /// <param name="start">The start index.</param> /// <param name="end">The end index.</param> /// <param name="progressCallback">The progress callback.</param> /// <returns>A partitioning index.</returns> private int Partition(int[] values, int start, int end, Action <SortProgress> progressCallback) { var pivot = values[end]; var i = start - 1; for (var j = start; j <= end - 1; j++) { if (values[j] <= pivot) { i++; progressCallback?.Invoke(new SortProgress(new[] { j, start, end }, values)); SortHelpers.Swap(values, i, j); progressCallback?.Invoke(new SortProgress(new[] { i, start, end }, values)); } } progressCallback?.Invoke(new SortProgress(new[] { start, end }, values)); SortHelpers.Swap(values, i + 1, end); progressCallback?.Invoke(new SortProgress(new[] { i + 1, start, end }, values)); return(i + 1); }
public static void Sort(int[] array) { if (array.Length == 0) { return; } for (var i = 0; i < array.Length - 1; ++i) { var minIndex = i; for (var j = i + 1; j < array.Length; ++j) { if (array[j] < array[minIndex]) { minIndex = j; } } if (i != minIndex) { SortHelpers.Swap(array, i, minIndex); } } }
protected override Task WriteAsyncTask(IList <LogEventInfo> logEvents, CancellationToken cancellationToken) { //must sort into containers and then into the blobs for the container if (_getTablePartitionNameDelegate == null) { _getTablePartitionNameDelegate = logEvent => new TablePartitionKey(RenderLogEvent(TableName, logEvent), RenderLogEvent(PartitionKey, logEvent)); } if (logEvents.Count == 1) { var batchItem = GenerateBatch(logEvents, RenderLogEvent(PartitionKey, logEvents[0])); return(WriteToTableAsync(RenderLogEvent(TableName, logEvents[0]), batchItem, cancellationToken)); } const int BatchMaxSize = 100; var partitionBuckets = SortHelpers.BucketSort(logEvents, _getTablePartitionNameDelegate); IList <Task> multipleTasks = partitionBuckets.Count > 1 ? new List <Task>(partitionBuckets.Count) : null; foreach (var partitionBucket in partitionBuckets) { string tableName = partitionBucket.Key.TableName; try { if (partitionBucket.Value.Count <= BatchMaxSize) { var batchItem = GenerateBatch(partitionBucket.Value, partitionBucket.Key.PartitionId); var writeTask = WriteToTableAsync(partitionBucket.Key.TableName, batchItem, cancellationToken); if (multipleTasks == null) { return(writeTask); } multipleTasks.Add(writeTask); } else { // Must chain the tasks together so they don't run concurrently var batchCollection = GenerateBatches(partitionBucket.Value, partitionBucket.Key.PartitionId, BatchMaxSize); Task writeTask = WriteMultipleBatchesAsync(batchCollection, tableName, cancellationToken); if (multipleTasks == null) { return(writeTask); } multipleTasks.Add(writeTask); } } catch (Exception ex) { InternalLogger.Error(ex, "AzureTableStorageTarget(Name={0}): Failed to write table={1}", Name, tableName); if (multipleTasks == null) { throw; } } } return(Task.WhenAll(multipleTasks ?? new Task[0])); }
public void Sort() { SortHelpers.QuickSort(this.Items, (x, y) => x.Uri.AbsoluteUri.CompareTo(y.Uri.AbsoluteUri)); this.OnNotifyCollectionChanged(new NotifyCollectionChangedEventArgs(NotifyCollectionChangedAction.Reset)); }