private async Task <long> UpdateTracksAsync() { long numberUpdatedTracks = 0; var args = new IndexingStatusEventArgs() { IndexingAction = IndexingAction.UpdateTracks, ProgressPercent = 0 }; await Task.Run(() => { try { using (var conn = this.sqliteConnectionFactory.GetConnection()) { LogClient.Info("Starting updating tracks"); List <Track> alltracks = conn.Table <Track>().Select((t) => t).ToList(); long currentValue = 0; long totalValue = alltracks.Count; int lastPercent = 0; int batchSize = IndexerUtils.GetParallelBatchSize(alltracks.Count); Parallel.ForEach( Partitioner.Create(0, alltracks.Count, batchSize), new ParallelOptions { CancellationToken = cancellationService.CancellationToken, MaxDegreeOfParallelism = Environment.ProcessorCount }, range => { for (int i = range.Item1; i < range.Item2 && cancellationService.KeepRunning; ++i) { var dbTrack = alltracks[i]; var tracksToUpdate = new List <Track>(); try { if (IndexerUtils.IsTrackOutdated(dbTrack) || dbTrack.NeedsIndexing == 1) { this.ProcessTrack(dbTrack); tracksToUpdate.Add(dbTrack); conn.Update(dbTrack); numberUpdatedTracks += 1; } } catch (Exception ex) { LogClient.Error("There was a problem while updating Track with path='{0}'. Exception: {1}", dbTrack.Path, ex.Message); } lock (conn) { if (tracksToUpdate.Count > 0) { conn.BeginTransaction(); conn.UpdateAll(tracksToUpdate); conn.Commit(); } currentValue += (range.Item2 - range.Item1); int percent = IndexerUtils.CalculatePercent(currentValue, totalValue); lastPercent = percent; args.ProgressPercent = percent; } } }); LogClient.Info("Finished updating tracks"); } } catch (Exception ex) { LogClient.Error("There was a problem while updating Tracks. Exception: {0}", ex.Message); } }); return(numberUpdatedTracks); }
private async Task <long> AddTracksAsync() { if (newDiskPaths.Count == 0) { return(0); } long numberAddedTracks = 0; var args = new IndexingStatusEventArgs() { IndexingAction = IndexingAction.AddTracks, ProgressPercent = 0, ProgressCurrent = 0 }; this.IndexingStatusChanged(args); await Task.Run(() => { try { long currentValue = 0; long totalValue = this.newDiskPaths.Count; int batchSize = IndexerUtils.GetParallelBatchSize(newDiskPaths.Count); LogClient.Info("Processing {0} paths in batches of {1}", newDiskPaths.Count, batchSize); using (var conn = this.sqliteConnectionFactory.GetConnection()) { Parallel.ForEach( Partitioner.Create(0, newDiskPaths.Count, batchSize), new ParallelOptions { CancellationToken = cancellationService.CancellationToken, MaxDegreeOfParallelism = Environment.ProcessorCount }, range => { LogClient.Info("Processing range {0}-{1}", range.Item1, range.Item2); var newDiskTracks = new List <Track>(range.Item2 - range.Item1); var newFolderTracks = new List <Tuple <FolderPathInfo, Track> >(range.Item2 - range.Item1); for (int i = range.Item1; i < range.Item2 && cancellationService.KeepRunning; ++i) { var diskPath = newDiskPaths[i]; var safePath = diskPath.Path.ToSafePath(); try { var diskTrack = cache.GetTrack(safePath); if (diskTrack == null) { diskTrack = Track.CreateDefault(diskPath.Path); ProcessTrack(diskTrack); newDiskTracks.Add(diskTrack); Interlocked.Increment(ref numberAddedTracks); } newFolderTracks.Add(Tuple.Create(diskPath, diskTrack)); } catch (Exception ex) { LogClient.Error("There was a problem while adding Track with path='{0}'. Exception: {1}", diskPath, ex.Message); } } LogClient.Info("Finished range {0}-{1}", range.Item1, range.Item2); lock (conn) { conn.BeginTransaction(); conn.InsertAll(newDiskTracks); conn.InsertAll(newFolderTracks.Select(nft => new FolderTrack(nft.Item1.FolderId, nft.Item2.TrackID))); conn.Commit(); bool firstInsert = currentValue == 0; currentValue += newFolderTracks.Count; int percent = IndexerUtils.CalculatePercent(currentValue, totalValue); args.ProgressCurrent = Interlocked.Read(ref numberAddedTracks); args.ProgressPercent = percent; this.IndexingStatusChanged(args); } }); } } catch (Exception ex) { LogClient.Error("There was a problem while adding Tracks. Exception: {0}", ex.Message); } }); return(numberAddedTracks); }
private async Task <long> RemoveTracksAsync() { long numberRemovedTracks = 0; var args = new IndexingStatusEventArgs() { IndexingAction = IndexingAction.RemoveTracks, ProgressPercent = 0 }; await Task.Run(() => { try { using (var conn = this.sqliteConnectionFactory.GetConnection()) { LogClient.Info("Begin removing tracks"); // Delete all tracks with missing folders { int deletedTracks = conn.Execute("DELETE FROM Track WHERE TrackID NOT IN (SELECT TrackID FROM FolderTrack)"); if (deletedTracks > 0) { this.IndexingStatusChanged(args); numberRemovedTracks += deletedTracks; } } var existingTracks = conn.Query <QueryResultTrackIdAndPath>("SELECT TrackID, Path FROM Track").ToList(); Parallel.ForEach( Partitioner.Create(0, existingTracks.Count, IndexerUtils.GetParallelBatchSize(existingTracks.Count)), new ParallelOptions { CancellationToken = cancellationService.CancellationToken, MaxDegreeOfParallelism = Environment.ProcessorCount, }, range => { LogClient.Info("Checking range {0}-{1}", range.Item1, range.Item2); for (int i = range.Item1; i < range.Item2 && cancellationService.KeepRunning; ++i) { var trackIdAndPath = existingTracks[i]; if (!System.IO.File.Exists(trackIdAndPath.Path)) { lock (conn) { conn.Delete <Track>(trackIdAndPath.TrackId); } } } }); LogClient.Info("Finished removing tracks"); } } catch (Exception ex) { LogClient.Error("There was a problem while removing Tracks. Exception: {0}", ex.Message); } }); return(numberRemovedTracks); }