コード例 #1
0
        /// <summary>
        /// Output duplicate item diff
        /// </summary>
        /// <param name="datFile">Current DatFile object to use for updating</param>
        /// <param name="inputs">List of inputs to write out from</param>
        public static DatFile DiffDuplicates(DatFile datFile, List <ParentablePath> inputs)
        {
            InternalStopwatch watch = new InternalStopwatch("Initializing duplicate DAT");

            // Fill in any information not in the base DAT
            if (string.IsNullOrWhiteSpace(datFile.Header.FileName))
            {
                datFile.Header.FileName = "All DATs";
            }

            if (string.IsNullOrWhiteSpace(datFile.Header.Name))
            {
                datFile.Header.Name = "datFile.All DATs";
            }

            if (string.IsNullOrWhiteSpace(datFile.Header.Description))
            {
                datFile.Header.Description = "datFile.All DATs";
            }

            string  post     = " (Duplicates)";
            DatFile dupeData = DatFile.Create(datFile.Header);

            dupeData.Header.FileName    += post;
            dupeData.Header.Name        += post;
            dupeData.Header.Description += post;
            dupeData.Items = new ItemDictionary();

            watch.Stop();

            // Now, loop through the dictionary and populate the correct DATs
            watch.Start("Populating duplicate DAT");

            Parallel.ForEach(datFile.Items.Keys, Globals.ParallelOptions, key =>
            {
                ConcurrentList <DatItem> items = DatItem.Merge(datFile.Items[key]);

                // If the rom list is empty or null, just skip it
                if (items == null || items.Count == 0)
                {
                    return;
                }

                // Loop through and add the items correctly
                foreach (DatItem item in items)
                {
                    if (item.DupeType.HasFlag(DupeType.External))
                    {
                        DatItem newrom       = item.Clone() as DatItem;
                        newrom.Machine.Name += $" ({Path.GetFileNameWithoutExtension(inputs[item.Source.Index].CurrentPath)})";

                        dupeData.Items.Add(key, newrom);
                    }
                }
            });

            watch.Stop();

            return(dupeData);
        }
コード例 #2
0
ファイル: Remover.cs プロジェクト: SabreTools/SabreTools
        /// <summary>
        /// Remove fields from a DatFile
        /// </summary>
        /// <param name="datFile">Current DatFile object to run operations on</param>
        public void ApplyRemovals(DatFile datFile)
        {
            // If the removers don't exist, we can't use it
            if (DatHeaderRemover == null && DatItemRemover == null)
            {
                return;
            }

            InternalStopwatch watch = new InternalStopwatch("Applying removals to DAT");

            // Remove DatHeader fields
            if (DatHeaderRemover != null && DatHeaderRemover.DatHeaderFields.Any())
            {
                DatHeaderRemover.RemoveFields(datFile.Header);
            }

            // Remove DatItem and Machine fields
            if (DatItemRemover != null && (DatItemRemover.MachineFields.Any() || DatItemRemover.DatItemFields.Any()))
            {
                Parallel.ForEach(datFile.Items.Keys, Globals.ParallelOptions, key =>
                {
                    ConcurrentList <DatItem> items = datFile.Items[key];
                    for (int j = 0; j < items.Count; j++)
                    {
                        DatItemRemover.RemoveFields(items[j]);
                    }

                    datFile.Items.Remove(key);
                    datFile.Items.AddRange(key, items);
                });
            }

            watch.Stop();
        }
コード例 #3
0
        /// <summary>
        /// Split a DAT by type of DatItem
        /// </summary>
        /// <param name="datFile">Current DatFile object to split</param>
        /// <returns>Dictionary of ItemType to DatFile mappings</returns>
        public static Dictionary <ItemType, DatFile> SplitByType(DatFile datFile)
        {
            // Create each of the respective output DATs
            InternalStopwatch watch = new InternalStopwatch($"Splitting DAT by item type");

            // Create the set of type-to-dat mappings
            Dictionary <ItemType, DatFile> typeDats = new Dictionary <ItemType, DatFile>();

            // We only care about a subset of types
            List <ItemType> outputTypes = new List <ItemType>
            {
                ItemType.Disk,
                ItemType.Media,
                ItemType.Rom,
                ItemType.Sample,
            };

            // Setup all of the DatFiles
            foreach (ItemType itemType in outputTypes)
            {
                typeDats[itemType] = DatFile.Create(datFile.Header.CloneStandard());
                typeDats[itemType].Header.FileName    += $" ({itemType})";
                typeDats[itemType].Header.Name        += $" ({itemType})";
                typeDats[itemType].Header.Description += $" ({itemType})";
            }

            // Now populate each of the DAT objects in turn
            Parallel.ForEach(outputTypes, Globals.ParallelOptions, itemType =>
            {
                FillWithItemType(datFile, typeDats[itemType], itemType);
            });

            watch.Stop();
            return(typeDats);
        }
コード例 #4
0
        /// <summary>
        /// Populate from multiple paths while returning the invividual headers
        /// </summary>
        /// <param name="datFile">Current DatFile object to use for updating</param>
        /// <param name="inputs">Paths to DATs to parse</param>
        /// <returns>List of DatHeader objects representing headers</returns>
        public static List <DatHeader> PopulateUserData(DatFile datFile, List <ParentablePath> inputs)
        {
            DatFile[]         datFiles = new DatFile[inputs.Count];
            InternalStopwatch watch    = new InternalStopwatch("Processing individual DATs");

            // Parse all of the DATs into their own DatFiles in the array
            Parallel.For(0, inputs.Count, Globals.ParallelOptions, i =>
            {
                var input = inputs[i];
                logger.User($"Adding DAT: {input.CurrentPath}");
                datFiles[i] = DatFile.Create(datFile.Header.CloneFiltering());
                Parser.ParseInto(datFiles[i], input, i, keep: true);
            });

            watch.Stop();

            watch.Start("Populating internal DAT");
            for (int i = 0; i < inputs.Count; i++)
            {
                AddFromExisting(datFile, datFiles[i], true);
            }

            watch.Stop();

            return(datFiles.Select(d => d.Header).ToList());
        }
コード例 #5
0
        /// <summary>
        /// Output cascading diffs
        /// </summary>
        /// <param name="datFile">Current DatFile object to use for updating</param>
        /// <param name="datHeaders">Dat headers used optionally</param>
        /// <returns>List of DatFiles representing the individually indexed items</returns>
        public static List <DatFile> DiffCascade(DatFile datFile, List <DatHeader> datHeaders)
        {
            // Create a list of DatData objects representing output files
            List <DatFile> outDats = new List <DatFile>();

            // Ensure the current DatFile is sorted optimally
            datFile.Items.BucketBy(ItemKey.CRC, DedupeType.None);

            // Loop through each of the inputs and get or create a new DatData object
            InternalStopwatch watch = new InternalStopwatch("Initializing and filling all output DATs");

            // Create the DatFiles from the set of headers
            DatFile[] outDatsArray = new DatFile[datHeaders.Count];
            Parallel.For(0, datHeaders.Count, Globals.ParallelOptions, j =>
            {
                DatFile diffData = DatFile.Create(datHeaders[j]);
                diffData.Items   = new ItemDictionary();
                FillWithSourceIndex(datFile, diffData, j);
                outDatsArray[j] = diffData;
            });

            outDats = outDatsArray.ToList();
            watch.Stop();

            return(outDats);
        }
コード例 #6
0
ファイル: Statistics.cs プロジェクト: SabreTools/SabreTools
        /// <summary>
        /// Output the stats for a list of input dats as files in a human-readable format
        /// </summary>
        /// <param name="stats">List of pre-calculated statistics objects</param>
        /// <param name="reportName">Name of the output file</param>
        /// <param name="baddumpCol">True if baddumps should be included in output, false otherwise</param>
        /// <param name="nodumpCol">True if nodumps should be included in output, false otherwise</param>
        /// <param name="statDatFormat"> Set the statistics output format to use</param>
        /// <param name="throwOnError">True if the error that is thrown should be thrown back to the caller, false otherwise</param>
        /// <returns>True if the report was written correctly, false otherwise</returns>
        public static bool Write(
            List <DatStatistics> stats,
            string reportName,
            string outDir,
            bool baddumpCol,
            bool nodumpCol,
            StatReportFormat statDatFormat,
            bool throwOnError = false)
        {
            // If there's no output format, set the default
            if (statDatFormat == StatReportFormat.None)
            {
                logger.Verbose("No report format defined, defaulting to textfile");
                statDatFormat = StatReportFormat.Textfile;
            }

            // Get the proper output file name
            if (string.IsNullOrWhiteSpace(reportName))
            {
                reportName = "report";
            }

            // Get the proper output directory name
            outDir = outDir.Ensure();

            InternalStopwatch watch = new InternalStopwatch($"Writing out report data to '{outDir}'");

            // Get the dictionary of desired output report names
            Dictionary <StatReportFormat, string> outfiles = CreateOutStatsNames(outDir, statDatFormat, reportName);

            try
            {
                // Write out all required formats
                Parallel.ForEach(outfiles.Keys, Globals.ParallelOptions, reportFormat =>
                {
                    string outfile = outfiles[reportFormat];
                    try
                    {
                        BaseReport.Create(reportFormat, stats)?.WriteToFile(outfile, baddumpCol, nodumpCol, throwOnError);
                    }
                    catch (Exception ex) when(!throwOnError)
                    {
                        logger.Error(ex, $"Report '{outfile}' could not be written out");
                    }
                });
            }
            catch (Exception ex) when(!throwOnError)
            {
                logger.Error(ex);
                return(false);
            }
            finally
            {
                watch.Stop();
            }

            return(true);
        }
コード例 #7
0
ファイル: DatFromDir.cs プロジェクト: SabreTools/SabreTools
        /// <summary>
        /// Create a new Dat from a directory
        /// </summary>
        /// <param name="datFile">Current DatFile object to add to</param>
        /// <param name="basePath">Base folder to be used in creating the DAT</param>
        /// <param name="asFiles">TreatAsFiles representing CHD and Archive scanning</param>
        /// <param name="skipFileType">Type of files that should be skipped</param>
        /// <param name="addBlanks">True if blank items should be created for empty folders, false otherwise</param>
        /// <param name="hashes">Hashes to include in the information</param>
        public static bool PopulateFromDir(
            DatFile datFile,
            string basePath,
            TreatAsFile asFiles       = 0x00,
            SkipFileType skipFileType = SkipFileType.None,
            bool addBlanks            = false,
            Hash hashes = Hash.Standard)
        {
            // Set the progress variables
            long totalSize   = 0;
            long currentSize = 0;

            InternalStopwatch watch = new InternalStopwatch($"Populating DAT from {basePath}");

            // Process the input
            if (Directory.Exists(basePath))
            {
                logger.Verbose($"Folder found: {basePath}");

                // Get a list of all files to process
                List <string> files = Directory.EnumerateFiles(basePath, "*", SearchOption.AllDirectories).ToList();

                // Loop through and add the file sizes
                Parallel.ForEach(files, Globals.ParallelOptions, item =>
                {
                    Interlocked.Add(ref totalSize, new FileInfo(item).Length);
                });

                // Process the files in the main folder or any subfolder
                logger.User(totalSize, currentSize);
                foreach (string item in files)
                {
                    CheckFileForHashes(datFile, item, basePath, asFiles, skipFileType, addBlanks, hashes);
                    currentSize += new FileInfo(item).Length;
                    logger.User(totalSize, currentSize, item);
                }

                // Now find all folders that are empty, if we are supposed to
                if (addBlanks)
                {
                    ProcessDirectoryBlanks(datFile, basePath);
                }
            }
            else if (File.Exists(basePath))
            {
                logger.Verbose($"File found: {basePath}");

                totalSize = new FileInfo(basePath).Length;
                logger.User(totalSize, currentSize);

                string parentPath = Path.GetDirectoryName(Path.GetDirectoryName(basePath));
                CheckFileForHashes(datFile, basePath, parentPath, asFiles, skipFileType, addBlanks, hashes);
                logger.User(totalSize, totalSize, basePath);
            }

            watch.Stop();
            return(true);
        }
コード例 #8
0
        /// <summary>
        /// Split a DAT by input extensions
        /// </summary>
        /// <param name="datFile">Current DatFile object to split</param>
        /// <param name="extA">List of extensions to split on (first DAT)</param>
        /// <param name="extB">List of extensions to split on (second DAT)</param>
        /// <returns>Extension Set A and Extension Set B DatFiles</returns>
        public static (DatFile extADat, DatFile extBDat) SplitByExtension(DatFile datFile, List <string> extA, List <string> extB)
        {
            // If roms is empty, return false
            if (datFile.Items.TotalCount == 0)
            {
                return(null, null);
            }

            InternalStopwatch watch = new InternalStopwatch($"Splitting DAT by extension");

            // Make sure all of the extensions don't have a dot at the beginning
            var    newExtA       = extA.Select(s => s.TrimStart('.').ToLowerInvariant());
            string newExtAString = string.Join(",", newExtA);

            var    newExtB       = extB.Select(s => s.TrimStart('.').ToLowerInvariant());
            string newExtBString = string.Join(",", newExtB);

            // Set all of the appropriate outputs for each of the subsets
            DatFile extADat = DatFile.Create(datFile.Header.CloneStandard());

            extADat.Header.FileName    += $" ({newExtAString})";
            extADat.Header.Name        += $" ({newExtAString})";
            extADat.Header.Description += $" ({newExtAString})";

            DatFile extBDat = DatFile.Create(datFile.Header.CloneStandard());

            extBDat.Header.FileName    += $" ({newExtBString})";
            extBDat.Header.Name        += $" ({newExtBString})";
            extBDat.Header.Description += $" ({newExtBString})";

            // Now separate the roms accordingly
            Parallel.ForEach(datFile.Items.Keys, Globals.ParallelOptions, key =>
            {
                ConcurrentList <DatItem> items = datFile.Items[key];
                foreach (DatItem item in items)
                {
                    if (newExtA.Contains((item.GetName() ?? string.Empty).GetNormalizedExtension()))
                    {
                        extADat.Items.Add(key, item);
                    }
                    else if (newExtB.Contains((item.GetName() ?? string.Empty).GetNormalizedExtension()))
                    {
                        extBDat.Items.Add(key, item);
                    }
                    else
                    {
                        extADat.Items.Add(key, item);
                        extBDat.Items.Add(key, item);
                    }
                }
            });

            // Then return both DatFiles
            watch.Stop();
            return(extADat, extBDat);
        }
コード例 #9
0
        /// <inheritdoc/>
        public override bool WriteToFile(string outfile, bool baddumpCol, bool nodumpCol, bool throwOnError = false)
        {
            InternalStopwatch watch = new InternalStopwatch($"Writing statistics to '{outfile}");

            try
            {
                // Try to create the output file
                Stream fs = _writeToConsole ? Console.OpenStandardOutput() : File.Create(outfile);
                if (fs == null)
                {
                    logger.Warning($"File '{outfile}' could not be created for writing! Please check to see if the file is writable");
                    return(false);
                }

                StreamWriter sw = new StreamWriter(fs);

                // Now process each of the statistics
                for (int i = 0; i < Statistics.Count; i++)
                {
                    // Get the current statistic
                    DatStatistics stat = Statistics[i];

                    // If we have a directory statistic
                    if (stat.IsDirectory)
                    {
                        WriteIndividual(sw, stat, baddumpCol, nodumpCol);

                        // If we have anything but the last value, write the separator
                        if (i < Statistics.Count - 1)
                        {
                            WriteFooterSeparator(sw);
                        }
                    }

                    // If we have a normal statistic
                    else
                    {
                        WriteIndividual(sw, stat, baddumpCol, nodumpCol);
                    }
                }

                sw.Dispose();
                fs.Dispose();
            }
            catch (Exception ex) when(!throwOnError)
            {
                logger.Error(ex);
                return(false);
            }
            finally
            {
                watch.Stop();
            }

            return(true);
        }
コード例 #10
0
        /// <summary>
        /// Split a DAT by size of Rom
        /// </summary>
        /// <param name="datFile">Current DatFile object to split</param>
        /// <param name="radix">Long value representing the split point</param>
        /// <returns>Less Than and Greater Than DatFiles</returns>
        public static (DatFile lessThan, DatFile greaterThan) SplitBySize(DatFile datFile, long radix)
        {
            // Create each of the respective output DATs
            InternalStopwatch watch = new InternalStopwatch($"Splitting DAT by size");

            DatFile lessThan = DatFile.Create(datFile.Header.CloneStandard());

            lessThan.Header.FileName    += $" (less than {radix})";
            lessThan.Header.Name        += $" (less than {radix})";
            lessThan.Header.Description += $" (less than {radix})";

            DatFile greaterThan = DatFile.Create(datFile.Header.CloneStandard());

            greaterThan.Header.FileName    += $" (equal-greater than {radix})";
            greaterThan.Header.Name        += $" (equal-greater than {radix})";
            greaterThan.Header.Description += $" (equal-greater than {radix})";

            // Now populate each of the DAT objects in turn
            Parallel.ForEach(datFile.Items.Keys, Globals.ParallelOptions, key =>
            {
                ConcurrentList <DatItem> items = datFile.Items[key];
                foreach (DatItem item in items)
                {
                    // If the file is not a Rom, it automatically goes in the "lesser" dat
                    if (item.ItemType != ItemType.Rom)
                    {
                        lessThan.Items.Add(key, item);
                    }

                    // If the file is a Rom and has no size, put it in the "lesser" dat
                    else if (item.ItemType == ItemType.Rom && (item as Rom).Size == null)
                    {
                        lessThan.Items.Add(key, item);
                    }

                    // If the file is a Rom and less than the radix, put it in the "lesser" dat
                    else if (item.ItemType == ItemType.Rom && (item as Rom).Size < radix)
                    {
                        lessThan.Items.Add(key, item);
                    }

                    // If the file is a Rom and greater than or equal to the radix, put it in the "greater" dat
                    else if (item.ItemType == ItemType.Rom && (item as Rom).Size >= radix)
                    {
                        greaterThan.Items.Add(key, item);
                    }
                }
            });

            // Then return both DatFiles
            watch.Stop();
            return(lessThan, greaterThan);
        }
コード例 #11
0
ファイル: Splitter.cs プロジェクト: SabreTools/SabreTools
        // TODO: Should any of these create a new DatFile in the process?
        // The reason this comes up is that doing any of the splits or merges
        // is an inherently destructive process. Making it output a new DatFile
        // might make it easier to deal with multiple internal steps. On the other
        // hand, this will increase memory usage significantly and would force the
        // existing paths to behave entirely differently
        #region Running

        /// <summary>
        /// Apply splitting on the DatFile
        /// </summary>
        /// <param name="datFile">Current DatFile object to run operations on</param>
        /// <param name="useTags">True if DatFile tags override splitting, false otherwise</param>
        /// <param name="throwOnError">True if the error that is thrown should be thrown back to the caller, false otherwise</param>
        /// <returns>True if the DatFile was split, false on error</returns>
        public bool ApplySplitting(DatFile datFile, bool useTags, bool throwOnError = false)
        {
            InternalStopwatch watch = new InternalStopwatch("Applying splitting to DAT");

            try
            {
                // If we are using tags from the DAT, set the proper input for split type unless overridden
                if (useTags && SplitType == MergingFlag.None)
                {
                    SplitType = datFile.Header.ForceMerging;
                }

                // Run internal splitting
                switch (SplitType)
                {
                case MergingFlag.None:
                    // No-op
                    break;

                case MergingFlag.Device:
                    CreateDeviceNonMergedSets(datFile);
                    break;

                case MergingFlag.Full:
                    CreateFullyNonMergedSets(datFile);
                    break;

                case MergingFlag.NonMerged:
                    CreateNonMergedSets(datFile);
                    break;

                case MergingFlag.Merged:
                    CreateMergedSets(datFile);
                    break;

                case MergingFlag.Split:
                    CreateSplitSets(datFile);
                    break;
                }
            }
            catch (Exception ex) when(!throwOnError)
            {
                logger.Error(ex);
                return(false);
            }
            finally
            {
                watch.Stop();
            }

            return(true);
        }
コード例 #12
0
ファイル: Filter.cs プロジェクト: SabreTools/SabreTools
        /// <summary>
        /// Populate the filters objects using a set of key:value filters
        /// </summary>
        /// <param name="filters">List of key:value where ~key/!key is negated</param>
        public void PopulateFiltersFromList(List <string> filters)
        {
            // Instantiate the filters, if necessary
            MachineFilter ??= new MachineFilter();
            DatItemFilter ??= new DatItemFilter();

            // If the list is null or empty, just return
            if (filters == null || filters.Count == 0)
            {
                return;
            }

            InternalStopwatch watch = new InternalStopwatch("Populating filters from list");

            foreach (string filterPair in filters)
            {
                (string field, string value, bool negate) = ProcessFilterPair(filterPair);

                // If we don't even have a possible filter pair
                if (field == null && value == null)
                {
                    continue;
                }

                // Machine fields
                MachineField machineField = field.AsMachineField();
                if (machineField != MachineField.NULL)
                {
                    MachineFilter.SetFilter(machineField, value, negate);
                    MachineFilter.HasFilters = true;
                    continue;
                }

                // DatItem fields
                DatItemField datItemField = field.AsDatItemField();
                if (datItemField != DatItemField.NULL)
                {
                    DatItemFilter.SetFilter(datItemField, value, negate);
                    DatItemFilter.HasFilters = true;
                    continue;
                }

                // If we didn't match anything, log an error
                logger.Warning($"The value {field} did not match any filterable field names. Please check the wiki for more details on supported field names.");
            }

            watch.Stop();
        }
コード例 #13
0
        /// <summary>
        /// Parse a DAT and return all found games and roms within
        /// </summary>
        /// <param name="datFile">Current DatFile object to add to</param>
        /// <param name="input">Name of the file to be parsed</param>
        /// <param name="indexId">Index ID for the DAT</param>
        /// <param name="keep">True if full pathnames are to be kept, false otherwise (default)</param>
        /// <param name="keepext">True if original extension should be kept, false otherwise (default)</param>
        /// <param name="quotes">True if quotes are assumed in supported types (default), false otherwise</param>
        /// <param name="statsOnly">True to only add item statistics while parsing, false otherwise</param>
        /// <param name="throwOnError">True if the error that is thrown should be thrown back to the caller, false otherwise</param>
        public static void ParseInto(
            DatFile datFile,
            ParentablePath input,
            int indexId       = 0,
            bool keep         = false,
            bool keepext      = false,
            bool quotes       = true,
            bool statsOnly    = false,
            bool throwOnError = true)
        {
            // Get the current path from the filename
            string currentPath = input.CurrentPath;

            // Check the file extension first as a safeguard
            if (!Utilities.HasValidDatExtension(currentPath))
            {
                return;
            }

            // If the output filename isn't set already, get the internal filename
            datFile.Header.FileName = string.IsNullOrWhiteSpace(datFile.Header.FileName)
                ? (keepext
                    ? Path.GetFileName(currentPath)
                    : Path.GetFileNameWithoutExtension(currentPath))
                : datFile.Header.FileName;

            // If the output type isn't set already, get the internal output type
            DatFormat currentPathFormat = GetDatFormat(currentPath);

            datFile.Header.DatFormat = datFile.Header.DatFormat == 0 ? currentPathFormat : datFile.Header.DatFormat;
            datFile.Items.SetBucketedBy(ItemKey.CRC); // Setting this because it can reduce issues later

            InternalStopwatch watch = new InternalStopwatch($"Parsing '{currentPath}' into internal DAT");

            // Now parse the correct type of DAT
            try
            {
                var parsingDatFile = DatFile.Create(currentPathFormat, datFile, quotes);
                parsingDatFile?.ParseFile(currentPath, indexId, keep, statsOnly: statsOnly, throwOnError: throwOnError);
            }
            catch (Exception ex) when(!throwOnError)
            {
                logger.Error(ex, $"Error with file '{currentPath}'");
            }

            watch.Stop();
        }
コード例 #14
0
ファイル: Verification.cs プロジェクト: SabreTools/SabreTools
        /// <summary>
        /// Verify a DatFile against a set of inputs, leaving only missing files
        /// </summary>
        /// <param name="datFile">Current DatFile object to verify against</param>
        /// <param name="hashOnly">True if only hashes should be checked, false for full file information</param>
        /// <returns>True if verification was a success, false otherwise</returns>
        public static bool VerifyGeneric(DatFile datFile, bool hashOnly)
        {
            bool success = true;

            InternalStopwatch watch = new InternalStopwatch("Verifying all from supplied paths");

            // Force bucketing according to the flags
            datFile.Items.SetBucketedBy(ItemKey.NULL);
            if (hashOnly)
            {
                datFile.Items.BucketBy(ItemKey.CRC, DedupeType.Full);
            }
            else
            {
                datFile.Items.BucketBy(ItemKey.Machine, DedupeType.Full);
            }

            // Then mark items for removal
            var keys = datFile.Items.SortedKeys.ToList();

            foreach (string key in keys)
            {
                ConcurrentList <DatItem> items = datFile.Items[key];
                for (int i = 0; i < items.Count; i++)
                {
                    // Unmatched items will have a source ID of int.MaxValue, remove all others
                    if (items[i].Source.Index != int.MaxValue)
                    {
                        items[i].Remove = true;
                    }
                }

                // Set the list back, just in case
                datFile.Items[key] = items;
            }

            watch.Stop();

            // Set fixdat headers in case of writing out
            datFile.Header.FileName    = $"fixDAT_{datFile.Header.FileName}";
            datFile.Header.Name        = $"fixDAT_{datFile.Header.Name}";
            datFile.Header.Description = $"fixDAT_{datFile.Header.Description}";
            datFile.Items.ClearMarked();

            return(success);
        }
コード例 #15
0
        public override bool ProcessFeatures(Dictionary <string, Help.Feature> features)
        {
            // If the base fails, just fail out
            if (!base.ProcessFeatures(features))
            {
                return(false);
            }

            // Try to read each input as a batch run file
            foreach (string path in Inputs)
            {
                InternalStopwatch watch = new InternalStopwatch($"Processing '{path}'...");
                ProcessScript(path);
                watch.Stop();
            }

            return(true);
        }
コード例 #16
0
        /// <summary>
        /// Split a SuperDAT by lowest available directory level
        /// </summary>
        /// <param name="datFile">Current DatFile object to split</param>
        /// <param name="outDir">Name of the directory to write the DATs out to</param>
        /// <param name="shortname">True if short names should be used, false otherwise</param>
        /// <param name="basedat">True if original filenames should be used as the base for output filename, false otherwise</param>
        /// <returns>True if split succeeded, false otherwise</returns>
        public static bool SplitByLevel(DatFile datFile, string outDir, bool shortname, bool basedat)
        {
            InternalStopwatch watch = new InternalStopwatch($"Splitting DAT by level");

            // First, bucket by games so that we can do the right thing
            datFile.Items.BucketBy(ItemKey.Machine, DedupeType.None, lower: false, norename: true);

            // Create a temporary DAT to add things to
            DatFile tempDat = DatFile.Create(datFile.Header);

            tempDat.Header.Name = null;

            // Sort the input keys
            List <string> keys = datFile.Items.Keys.ToList();

            keys.Sort(SplitByLevelSort);

            // Then, we loop over the games
            Parallel.ForEach(keys, Globals.ParallelOptions, key =>
            {
                // Here, the key is the name of the game to be used for comparison
                if (tempDat.Header.Name != null && tempDat.Header.Name != Path.GetDirectoryName(key))
                {
                    // Reset the DAT for the next items
                    tempDat             = DatFile.Create(datFile.Header);
                    tempDat.Header.Name = null;
                }

                // Clean the input list and set all games to be pathless
                ConcurrentList <DatItem> items                 = datFile.Items[key];
                items.ForEach(item => item.Machine.Name        = Path.GetFileName(item.Machine.Name));
                items.ForEach(item => item.Machine.Description = Path.GetFileName(item.Machine.Description));

                // Now add the game to the output DAT
                tempDat.Items.AddRange(key, items);

                // Then set the DAT name to be the parent directory name
                tempDat.Header.Name = Path.GetDirectoryName(key);
            });

            watch.Stop();
            return(true);
        }
コード例 #17
0
ファイル: Remover.cs プロジェクト: SabreTools/SabreTools
        /// <summary>
        /// Populate the exclusion objects using a set of field names
        /// </summary>
        /// <param name="fields">List of field names</param>
        public void PopulateExclusionsFromList(List <string> fields)
        {
            // Instantiate the removers, if necessary
            DatHeaderRemover ??= new DatHeaderRemover();
            DatItemRemover ??= new DatItemRemover();

            // If the list is null or empty, just return
            if (fields == null || fields.Count == 0)
            {
                return;
            }

            InternalStopwatch watch = new InternalStopwatch("Populating removals from list");

            foreach (string field in fields)
            {
                // If we don't even have a possible field name
                if (field == null)
                {
                    continue;
                }

                // DatHeader fields
                if (DatHeaderRemover.SetRemover(field))
                {
                    continue;
                }

                // Machine and DatItem fields
                if (DatItemRemover.SetRemover(field))
                {
                    continue;
                }

                // If we didn't match anything, log an error
                logger.Warning($"The value {field} did not match any known field names. Please check the wiki for more details on supported field names.");
            }

            watch.Stop();
        }
コード例 #18
0
ファイル: ExtraIni.cs プロジェクト: SabreTools/SabreTools
        /// <summary>
        /// Populate item using field:file inputs
        /// </summary>
        /// <param name="inputs">Field and file combinations</param>
        public void PopulateFromList(List <string> inputs)
        {
            // If there are no inputs, just skip
            if (inputs == null || !inputs.Any())
            {
                return;
            }

            InternalStopwatch watch = new InternalStopwatch("Populating extras from list");

            foreach (string input in inputs)
            {
                ExtraIniItem item = new ExtraIniItem();

                // If we don't even have a possible field and file combination
                if (!input.Contains(":"))
                {
                    logger.Warning($"'{input}` is not a valid INI extras string. Valid INI extras strings are of the form 'key:value'. Please refer to README.1ST or the help feature for more details.");
                    return;
                }

                string inputTrimmed = input.Trim('"', ' ', '\t');
                string fieldString  = inputTrimmed.Split(':')[0].ToLowerInvariant().Trim('"', ' ', '\t');
                string fileString   = inputTrimmed[(fieldString.Length + 1)..].Trim('"', ' ', '\t');
コード例 #19
0
        /// <summary>
        /// Wrap verifying files using an input DAT
        /// </summary>
        /// <param name="datfiles">Names of the DATs to compare against</param>
        /// <param name="inputs">Input directories to compare against</param>
        /// <param name="depot">True if the input direcories are treated as romba depots, false otherwise</param>
        /// <param name="hashOnly">True if only hashes should be checked, false for full file information</param>
        /// <param name="quickScan">True to enable external scanning of archives, false otherwise</param>
        /// <param name="headerToCheckAgainst">Populated string representing the name of the skipper to use, a blank string to use the first available checker, null otherwise</param>
        /// <param name="splitType">Type of the split that should be performed (split, merged, fully merged)</param>
        /// <param name="chdsAsFiles">True if CHDs should be treated like regular files, false otherwise</param>
        /// <param name="individual">True if DATs should be verified individually, false if they should be done in bulk</param>
        private static void InitVerify(
            List <string> datfiles,
            List <string> inputs,
            bool depot,
            bool hashOnly,
            bool quickScan,
            string headerToCheckAgainst,
            SplitType splitType,
            bool chdsAsFiles,
            bool individual)
        {
            // Get the archive scanning level
            ArchiveScanLevel asl = Utilities.GetArchiveScanLevelFromNumbers(1, 1, 1, 1);

            // Get a list of files from the input datfiles
            datfiles = Utilities.GetOnlyFilesFromInputs(datfiles);

            // If we are in individual mode, process each DAT on their own
            if (individual)
            {
                foreach (string datfile in datfiles)
                {
                    DatFile datdata = new DatFile();
                    datdata.Parse(datfile, 99, 99, splitType, keep: true, useTags: true);

                    // If we have the depot flag, respect it
                    if (depot)
                    {
                        datdata.VerifyDepot(inputs, headerToCheckAgainst);
                    }
                    else
                    {
                        datdata.VerifyGeneric(inputs, hashOnly, quickScan, headerToCheckAgainst, chdsAsFiles);
                    }
                }
            }
            // Otherwise, process all DATs into the same output
            else
            {
                InternalStopwatch watch = new InternalStopwatch("Populating internal DAT");

                // Add all of the input DATs into one huge internal DAT
                DatFile datdata = new DatFile();
                foreach (string datfile in datfiles)
                {
                    datdata.Parse(datfile, 99, 99, splitType, keep: true, useTags: true);
                }

                watch.Stop();

                // If we have the depot flag, respect it
                if (depot)
                {
                    datdata.VerifyDepot(inputs, headerToCheckAgainst);
                }
                else
                {
                    datdata.VerifyGeneric(inputs, hashOnly, quickScan, headerToCheckAgainst, chdsAsFiles);
                }
            }
        }
コード例 #20
0
        /// <summary>
        /// Wrap sorting files using an input DAT
        /// </summary>
        /// <param name="datfiles">Names of the DATs to compare against</param>
        /// <param name="inputs">List of input files/folders to check</param>
        /// <param name="outDir">Output directory to use to build to</param>
        /// <param name="depot">True if the input direcories are treated as romba depots, false otherwise</param>
        /// <param name="quickScan">True to enable external scanning of archives, false otherwise</param>
        /// <param name="date">True if the date from the DAT should be used if available, false otherwise</param>
        /// <param name="delete">True if input files should be deleted, false otherwise</param>
        /// <param name="inverse">True if the DAT should be used as a filter instead of a template, false otherwise</param>
        /// <param name="outputFormat">Output format that files should be written to</param>
        /// <param name="romba">True if files should be output in Romba depot folders, false otherwise</param>
        /// <param name="sevenzip">Integer representing the archive handling level for 7z</param>
        /// <param name="gz">Integer representing the archive handling level for GZip</param>
        /// <param name="rar">Integer representing the archive handling level for RAR</param>
        /// <param name="zip">Integer representing the archive handling level for Zip</param>
        /// <param name="updateDat">True if the updated DAT should be output, false otherwise</param>
        /// <param name="headerToCheckAgainst">Populated string representing the name of the skipper to use, a blank string to use the first available checker, null otherwise</param>
        /// <param name="splitType">Type of the split that should be performed (split, merged, fully merged)</param>
        /// <param name="chdsAsFiles">True if CHDs should be treated like regular files, false otherwise</param>
        /// <param name="individual">True if DATs should be sorted individually, false if they should be done in bulk</param>
        private static void InitSort(
            List <string> datfiles,
            List <string> inputs,
            string outDir,
            bool depot,
            bool quickScan,
            bool date,
            bool delete,
            bool inverse,
            OutputFormat outputFormat,
            bool romba,
            int sevenzip,
            int gz,
            int rar,
            int zip,
            bool updateDat,
            string headerToCheckAgainst,
            SplitType splitType,
            bool chdsAsFiles,
            bool individual)
        {
            // Get the archive scanning level
            ArchiveScanLevel asl = Utilities.GetArchiveScanLevelFromNumbers(sevenzip, gz, rar, zip);

            // Get a list of files from the input datfiles
            datfiles = Utilities.GetOnlyFilesFromInputs(datfiles);

            // If we are in individual mode, process each DAT on their own, appending the DAT name to the output dir
            if (individual)
            {
                foreach (string datfile in datfiles)
                {
                    DatFile datdata = new DatFile();
                    datdata.Parse(datfile, 99, 99, splitType, keep: true, useTags: true);

                    // If we have the depot flag, respect it
                    if (depot)
                    {
                        datdata.RebuildDepot(inputs, Path.Combine(outDir, datdata.FileName), date, delete, inverse, outputFormat, romba,
                                             updateDat, headerToCheckAgainst);
                    }
                    else
                    {
                        datdata.RebuildGeneric(inputs, Path.Combine(outDir, datdata.FileName), quickScan, date, delete, inverse, outputFormat, romba, asl,
                                               updateDat, headerToCheckAgainst, chdsAsFiles);
                    }
                }
            }
            // Otherwise, process all DATs into the same output
            else
            {
                InternalStopwatch watch = new InternalStopwatch("Populating internal DAT");

                // Add all of the input DATs into one huge internal DAT
                DatFile datdata = new DatFile();
                foreach (string datfile in datfiles)
                {
                    datdata.Parse(datfile, 99, 99, splitType, keep: true, useTags: true);
                }

                watch.Stop();

                // If we have the depot flag, respect it
                if (depot)
                {
                    datdata.RebuildDepot(inputs, outDir, date, delete, inverse, outputFormat, romba,
                                         updateDat, headerToCheckAgainst);
                }
                else
                {
                    datdata.RebuildGeneric(inputs, outDir, quickScan, date, delete, inverse, outputFormat, romba, asl,
                                           updateDat, headerToCheckAgainst, chdsAsFiles);
                }
            }
        }
コード例 #21
0
ファイル: Statistics.cs プロジェクト: SabreTools/SabreTools
        /// <summary>
        /// Calculate statistics from a list of inputs
        /// </summary>
        /// <param name="inputs">List of input files and folders</param>
        /// <param name="single">True if single DAT stats are output, false otherwise</param>
        /// <param name="throwOnError">True if the error that is thrown should be thrown back to the caller, false otherwise</param>
        public static List <DatStatistics> CalculateStatistics(List <string> inputs, bool single, bool throwOnError = false)
        {
            // Create the output list
            List <DatStatistics> stats = new List <DatStatistics>();

            // Make sure we have all files and then order them
            List <ParentablePath> files = PathTool.GetFilesOnly(inputs);

            files = files
                    .OrderBy(i => Path.GetDirectoryName(i.CurrentPath))
                    .ThenBy(i => Path.GetFileName(i.CurrentPath))
                    .ToList();

            // Init total
            DatStatistics totalStats = new DatStatistics
            {
                Statistics   = new ItemDictionary(),
                DisplayName  = "DIR: All DATs",
                MachineCount = 0,
                IsDirectory  = true,
            };

            // Init directory-level variables
            string        lastdir  = null;
            DatStatistics dirStats = new DatStatistics
            {
                Statistics   = new ItemDictionary(),
                MachineCount = 0,
                IsDirectory  = true,
            };

            // Now process each of the input files
            foreach (ParentablePath file in files)
            {
                // Get the directory for the current file
                string thisdir = Path.GetDirectoryName(file.CurrentPath);

                // If we don't have the first file and the directory has changed, show the previous directory stats and reset
                if (lastdir != null && thisdir != lastdir && single)
                {
                    dirStats.DisplayName  = $"DIR: {WebUtility.HtmlEncode(lastdir)}";
                    dirStats.MachineCount = dirStats.Statistics.GameCount;
                    stats.Add(dirStats);
                    dirStats = new DatStatistics
                    {
                        Statistics   = new ItemDictionary(),
                        MachineCount = 0,
                        IsDirectory  = true,
                    };
                }

                InternalStopwatch watch = new InternalStopwatch($"Collecting statistics for '{file.CurrentPath}'");

                List <string> machines = new List <string>();
                DatFile       datdata  = Parser.CreateAndParse(file.CurrentPath, statsOnly: true, throwOnError: throwOnError);
                datdata.Items.BucketBy(ItemKey.Machine, DedupeType.None, norename: true);

                // Add single DAT stats (if asked)
                if (single)
                {
                    DatStatistics individualStats = new DatStatistics
                    {
                        Statistics   = datdata.Items,
                        DisplayName  = datdata.Header.FileName,
                        MachineCount = datdata.Items.Keys.Count,
                        IsDirectory  = false,
                    };
                    stats.Add(individualStats);
                }

                // Add single DAT stats to dir
                dirStats.Statistics.AddStatistics(datdata.Items);
                dirStats.Statistics.GameCount += datdata.Items.Keys.Count();

                // Add single DAT stats to totals
                totalStats.Statistics.AddStatistics(datdata.Items);
                totalStats.Statistics.GameCount += datdata.Items.Keys.Count();

                // Make sure to assign the new directory
                lastdir = thisdir;

                watch.Stop();
            }

            // Add last directory stats
            if (single)
            {
                dirStats.DisplayName  = $"DIR: {WebUtility.HtmlEncode(lastdir)}";
                dirStats.MachineCount = dirStats.Statistics.GameCount;
                stats.Add(dirStats);
            }

            // Add total DAT stats
            totalStats.MachineCount = totalStats.Statistics.GameCount;
            stats.Add(totalStats);

            return(stats);
        }
コード例 #22
0
ファイル: Sort.cs プロジェクト: elijah0067/SabreTools
        public override void ProcessFeatures(Dictionary <string, Feature> features)
        {
            base.ProcessFeatures(features);

            // Get feature flags
            TreatAsFile asFiles      = GetTreatAsFiles(features);
            bool        date         = GetBoolean(features, AddDateValue);
            bool        delete       = GetBoolean(features, DeleteValue);
            bool        inverse      = GetBoolean(features, InverseValue);
            bool        quickScan    = GetBoolean(features, QuickValue);
            bool        updateDat    = GetBoolean(features, UpdateDatValue);
            var         outputFormat = GetOutputFormat(features);

            // If we have the romba flag
            if (Header.OutputDepot?.IsActive == true)
            {
                // Update TorrentGzip output
                if (outputFormat == OutputFormat.TorrentGzip)
                {
                    outputFormat = OutputFormat.TorrentGzipRomba;
                }

                // Update TorrentXz output
                else if (outputFormat == OutputFormat.TorrentXZ)
                {
                    outputFormat = OutputFormat.TorrentXZRomba;
                }
            }

            // Get a list of files from the input datfiles
            var datfiles     = GetList(features, DatListValue);
            var datfilePaths = DirectoryExtensions.GetFilesOnly(datfiles);

            // If we are in individual mode, process each DAT on their own, appending the DAT name to the output dir
            if (GetBoolean(features, IndividualValue))
            {
                foreach (ParentablePath datfile in datfilePaths)
                {
                    DatFile datdata = DatFile.Create();
                    datdata.Parse(datfile, int.MaxValue, keep: true);

                    // Set depot information
                    datdata.Header.InputDepot  = Header.InputDepot.Clone() as DepotInformation;
                    datdata.Header.OutputDepot = Header.OutputDepot.Clone() as DepotInformation;

                    // If we have overridden the header skipper, set it now
                    if (!string.IsNullOrEmpty(Header.HeaderSkipper))
                    {
                        datdata.Header.HeaderSkipper = Header.HeaderSkipper;
                    }

                    // If we have the depot flag, respect it
                    bool success;
                    if (Header.InputDepot?.IsActive ?? false)
                    {
                        success = datdata.RebuildDepot(Inputs, Path.Combine(OutputDir, datdata.Header.FileName), date, delete, inverse, outputFormat);
                    }
                    else
                    {
                        success = datdata.RebuildGeneric(Inputs, Path.Combine(OutputDir, datdata.Header.FileName), quickScan, date, delete, inverse, outputFormat, asFiles);
                    }

                    // If we have a success and we're updating the DAT, write it out
                    if (success && updateDat)
                    {
                        datdata.Header.FileName    = $"fixDAT_{Header.FileName}";
                        datdata.Header.Name        = $"fixDAT_{Header.Name}";
                        datdata.Header.Description = $"fixDAT_{Header.Description}";
                        datdata.Items.ClearMarked();
                        datdata.Write(OutputDir);
                    }
                }
            }

            // Otherwise, process all DATs into the same output
            else
            {
                InternalStopwatch watch = new InternalStopwatch("Populating internal DAT");

                // Add all of the input DATs into one huge internal DAT
                DatFile datdata = DatFile.Create();
                foreach (ParentablePath datfile in datfilePaths)
                {
                    datdata.Parse(datfile, int.MaxValue, keep: true);
                }

                // Set depot information
                datdata.Header.InputDepot  = Header.InputDepot.Clone() as DepotInformation;
                datdata.Header.OutputDepot = Header.OutputDepot.Clone() as DepotInformation;

                // If we have overridden the header skipper, set it now
                if (!string.IsNullOrEmpty(Header.HeaderSkipper))
                {
                    datdata.Header.HeaderSkipper = Header.HeaderSkipper;
                }

                watch.Stop();

                // If we have the depot flag, respect it
                bool success;
                if (Header.InputDepot?.IsActive ?? false)
                {
                    success = datdata.RebuildDepot(Inputs, OutputDir, date, delete, inverse, outputFormat);
                }
                else
                {
                    success = datdata.RebuildGeneric(Inputs, OutputDir, quickScan, date, delete, inverse, outputFormat, asFiles);
                }

                // If we have a success and we're updating the DAT, write it out
                if (success && updateDat)
                {
                    datdata.Header.FileName    = $"fixDAT_{Header.FileName}";
                    datdata.Header.Name        = $"fixDAT_{Header.Name}";
                    datdata.Header.Description = $"fixDAT_{Header.Description}";
                    datdata.Items.ClearMarked();
                    datdata.Write(OutputDir);
                }
            }
        }
コード例 #23
0
ファイル: Split.cs プロジェクト: SabreTools/SabreTools
        public override bool ProcessFeatures(Dictionary <string, Help.Feature> features)
        {
            // If the base fails, just fail out
            if (!base.ProcessFeatures(features))
            {
                return(false);
            }

            // Get the splitting mode
            SplittingMode splittingMode = GetSplittingMode(features);

            if (splittingMode == SplittingMode.None)
            {
                logger.Error("No valid splitting mode found!");
                return(false);
            }

            // Get only files from the inputs
            List <ParentablePath> files = PathTool.GetFilesOnly(Inputs, appendparent: true);

            // Loop over the input files
            foreach (ParentablePath file in files)
            {
                // Create and fill the new DAT
                DatFile internalDat = DatFile.Create(Header);
                Parser.ParseInto(internalDat, file);

                // Get the output directory
                OutputDir = OutputDir.Ensure();
                OutputDir = file.GetOutputPath(OutputDir, GetBoolean(features, InplaceValue));

                // Extension splitting
                if (splittingMode.HasFlag(SplittingMode.Extension))
                {
                    (DatFile extADat, DatFile extBDat) = DatTools.Splitter.SplitByExtension(internalDat, GetList(features, ExtAListValue), GetList(features, ExtBListValue));

                    InternalStopwatch watch = new InternalStopwatch("Outputting extension-split DATs");

                    // Output both possible DatFiles
                    Writer.Write(extADat, OutputDir);
                    Writer.Write(extBDat, OutputDir);

                    watch.Stop();
                }

                // Hash splitting
                if (splittingMode.HasFlag(SplittingMode.Hash))
                {
                    Dictionary <DatItemField, DatFile> typeDats = DatTools.Splitter.SplitByHash(internalDat);

                    InternalStopwatch watch = new InternalStopwatch("Outputting hash-split DATs");

                    // Loop through each type DatFile
                    Parallel.ForEach(typeDats.Keys, Globals.ParallelOptions, itemType =>
                    {
                        Writer.Write(typeDats[itemType], OutputDir);
                    });

                    watch.Stop();
                }

                // Level splitting
                if (splittingMode.HasFlag(SplittingMode.Level))
                {
                    logger.Warning("This feature is not implemented: level-split");
                    DatTools.Splitter.SplitByLevel(
                        internalDat,
                        OutputDir,
                        GetBoolean(features, ShortValue),
                        GetBoolean(features, BaseValue));
                }

                // Size splitting
                if (splittingMode.HasFlag(SplittingMode.Size))
                {
                    (DatFile lessThan, DatFile greaterThan) = DatTools.Splitter.SplitBySize(internalDat, GetInt64(features, RadixInt64Value));

                    InternalStopwatch watch = new InternalStopwatch("Outputting size-split DATs");

                    // Output both possible DatFiles
                    Writer.Write(lessThan, OutputDir);
                    Writer.Write(greaterThan, OutputDir);

                    watch.Stop();
                }

                // Total Size splitting
                if (splittingMode.HasFlag(SplittingMode.TotalSize))
                {
                    logger.Warning("This feature is not implemented: level-split");
                    List <DatFile> sizedDats = DatTools.Splitter.SplitByTotalSize(internalDat, GetInt64(features, ChunkSizeInt64Value));

                    InternalStopwatch watch = new InternalStopwatch("Outputting total-size-split DATs");

                    // Loop through each type DatFile
                    Parallel.ForEach(sizedDats, Globals.ParallelOptions, sizedDat =>
                    {
                        Writer.Write(sizedDat, OutputDir);
                    });

                    watch.Stop();
                }

                // Type splitting
                if (splittingMode.HasFlag(SplittingMode.Type))
                {
                    Dictionary <ItemType, DatFile> typeDats = DatTools.Splitter.SplitByType(internalDat);

                    InternalStopwatch watch = new InternalStopwatch("Outputting ItemType DATs");

                    // Loop through each type DatFile
                    Parallel.ForEach(typeDats.Keys, Globals.ParallelOptions, itemType =>
                    {
                        Writer.Write(typeDats[itemType], OutputDir);
                    });

                    watch.Stop();
                }
            }

            return(true);
        }
コード例 #24
0
        public override void ProcessFeatures(Dictionary <string, Library.Help.Feature> features)
        {
            base.ProcessFeatures(features);

            // Get feature flags
            var updateFields = GetUpdateFields(features);
            var updateMode   = GetUpdateMode(features);

            // Normalize the extensions
            Header.AddExtension = (string.IsNullOrWhiteSpace(Header.AddExtension) || Header.AddExtension.StartsWith(".")
                ? Header.AddExtension
                : $".{Header.AddExtension}");
            Header.ReplaceExtension = (string.IsNullOrWhiteSpace(Header.ReplaceExtension) || Header.ReplaceExtension.StartsWith(".")
                ? Header.ReplaceExtension
                : $".{Header.ReplaceExtension}");

            // If we're in a special update mode and the names aren't set, set defaults
            if (updateMode != 0)
            {
                // Get the values that will be used
                if (string.IsNullOrWhiteSpace(Header.Date))
                {
                    Header.Date = DateTime.Now.ToString("yyyy-MM-dd");
                }

                if (string.IsNullOrWhiteSpace(Header.Name))
                {
                    Header.Name = (updateMode != 0 ? "DiffDAT" : "MergeDAT")
                                  + (Header.Type == "SuperDAT" ? "-SuperDAT" : string.Empty)
                                  + (Cleaner.DedupeRoms != DedupeType.None ? "-deduped" : string.Empty);
                }

                if (string.IsNullOrWhiteSpace(Header.Description))
                {
                    Header.Description = (updateMode != 0 ? "DiffDAT" : "MergeDAT")
                                         + (Header.Type == "SuperDAT" ? "-SuperDAT" : string.Empty)
                                         + (Cleaner.DedupeRoms != DedupeType.None ? " - deduped" : string.Empty);

                    if (!GetBoolean(features, NoAutomaticDateValue))
                    {
                        Header.Description += $" ({Header.Date})";
                    }
                }

                if (string.IsNullOrWhiteSpace(Header.Category) && updateMode != 0)
                {
                    Header.Category = "DiffDAT";
                }

                if (string.IsNullOrWhiteSpace(Header.Author))
                {
                    Header.Author = "SabreTools";
                }
            }

            // If no update fields are set, default to Names
            if (updateFields == null || updateFields.Count == 0)
            {
                updateFields = new List <Field>()
                {
                    Field.DatItem_Name
                }
            }
            ;

            // Ensure we only have files in the inputs
            List <ParentablePath> inputPaths = DirectoryExtensions.GetFilesOnly(Inputs, appendparent: true);
            List <ParentablePath> basePaths  = DirectoryExtensions.GetFilesOnly(GetList(features, BaseDatListValue));

            // If we're in standard update mode, run through all of the inputs
            if (updateMode == UpdateMode.None)
            {
                // Loop through each input and update
                Parallel.ForEach(inputPaths, Globals.ParallelOptions, inputPath =>
                {
                    // Create a new base DatFile
                    DatFile datFile = DatFile.Create(Header);
                    logger.User($"Processing '{Path.GetFileName(inputPath.CurrentPath)}'");
                    datFile.Parse(inputPath, keep: true,
                                  keepext: datFile.Header.DatFormat.HasFlag(DatFormat.TSV) ||
                                  datFile.Header.DatFormat.HasFlag(DatFormat.CSV) ||
                                  datFile.Header.DatFormat.HasFlag(DatFormat.SSV));

                    // Perform additional processing steps
                    datFile.ApplyExtras(Extras);
                    datFile.ApplySplitting(GetSplitType(features), false);
                    datFile.ApplyFilter(Filter);
                    datFile.ApplyCleaning(Cleaner);

                    // Get the correct output path
                    string realOutDir = inputPath.GetOutputPath(OutputDir, GetBoolean(features, InplaceValue));

                    // Try to output the file, overwriting only if it's not in the current directory
                    datFile.Write(realOutDir, overwrite: GetBoolean(features, InplaceValue));
                });

                return;
            }

            // Reverse inputs if we're in a required mode
            if (updateMode.HasFlag(UpdateMode.DiffReverseCascade))
            {
                updateMode |= UpdateMode.DiffCascade;
                inputPaths.Reverse();
            }
            if (updateMode.HasFlag(UpdateMode.ReverseBaseReplace))
            {
                updateMode |= UpdateMode.BaseReplace;
                basePaths.Reverse();
            }

            // Create a DAT to capture inputs
            DatFile userInputDat = DatFile.Create(Header);

            // Populate using the correct set
            List <DatHeader> datHeaders;

            if (updateMode.HasFlag(UpdateMode.DiffAgainst) || updateMode.HasFlag(UpdateMode.BaseReplace))
            {
                datHeaders = userInputDat.PopulateUserData(basePaths);
            }
            else
            {
                datHeaders = userInputDat.PopulateUserData(inputPaths);
            }

            // Perform additional processing steps
            userInputDat.ApplyExtras(Extras);
            userInputDat.ApplySplitting(GetSplitType(features), false);
            userInputDat.ApplyFilter(Filter);
            userInputDat.ApplyCleaning(Cleaner);

            // Output only DatItems that are duplicated across inputs
            if (updateMode.HasFlag(UpdateMode.DiffDupesOnly))
            {
                DatFile dupeData = userInputDat.DiffDuplicates(inputPaths);

                InternalStopwatch watch = new InternalStopwatch("Outputting duplicate DAT");
                dupeData.Write(OutputDir, overwrite: false);
                watch.Stop();
            }

            // Output only DatItems that are not duplicated across inputs
            if (updateMode.HasFlag(UpdateMode.DiffNoDupesOnly))
            {
                DatFile outerDiffData = userInputDat.DiffNoDuplicates(inputPaths);

                InternalStopwatch watch = new InternalStopwatch("Outputting no duplicate DAT");
                outerDiffData.Write(OutputDir, overwrite: false);
                watch.Stop();
            }

            // Output only DatItems that are unique to each input
            if (updateMode.HasFlag(UpdateMode.DiffIndividualsOnly))
            {
                // Get all of the output DatFiles
                List <DatFile> datFiles = userInputDat.DiffIndividuals(inputPaths);

                // Loop through and output the new DatFiles
                InternalStopwatch watch = new InternalStopwatch("Outputting all individual DATs");

                Parallel.For(0, inputPaths.Count, Globals.ParallelOptions, j =>
                {
                    string path = inputPaths[j].GetOutputPath(OutputDir, GetBoolean(features, InplaceValue));

                    // Try to output the file
                    datFiles[j].Write(path, overwrite: GetBoolean(features, InplaceValue));
                });

                watch.Stop();
            }

            // Output cascaded diffs
            if (updateMode.HasFlag(UpdateMode.DiffCascade))
            {
                // Preprocess the DatHeaders
                Parallel.For(0, datHeaders.Count, Globals.ParallelOptions, j =>
                {
                    // If we're outputting to the runtime folder, rename
                    if (!GetBoolean(features, InplaceValue) && OutputDir == Environment.CurrentDirectory)
                    {
                        string innerpost = $" ({j} - {inputPaths[j].GetNormalizedFileName(true)} Only)";

                        datHeaders[j]              = userInputDat.Header;
                        datHeaders[j].FileName    += innerpost;
                        datHeaders[j].Name        += innerpost;
                        datHeaders[j].Description += innerpost;
                    }
                });

                // Get all of the output DatFiles
                List <DatFile> datFiles = userInputDat.DiffCascade(datHeaders);

                // Loop through and output the new DatFiles
                InternalStopwatch watch = new InternalStopwatch("Outputting all created DATs");

                int startIndex = GetBoolean(features, SkipFirstOutputValue) ? 1 : 0;
                Parallel.For(startIndex, inputPaths.Count, Globals.ParallelOptions, j =>
                {
                    string path = inputPaths[j].GetOutputPath(OutputDir, GetBoolean(features, InplaceValue));

                    // Try to output the file
                    datFiles[j].Write(path, overwrite: GetBoolean(features, InplaceValue));
                });

                watch.Stop();
            }

            // Output differences against a base DAT
            if (updateMode.HasFlag(UpdateMode.DiffAgainst))
            {
                // Loop through each input and diff against the base
                Parallel.ForEach(inputPaths, Globals.ParallelOptions, inputPath =>
                {
                    // Parse the path to a new DatFile
                    DatFile repDat = DatFile.Create(userInputDat.Header.CloneFiltering());
                    repDat.Parse(inputPath, indexId: 1, keep: true);

                    // Perform additional processing steps
                    repDat.ApplyExtras(Extras);
                    repDat.ApplySplitting(GetSplitType(features), false);
                    repDat.ApplyFilter(Filter);
                    repDat.ApplyCleaning(Cleaner);

                    // Now replace the fields from the base DatFile
                    userInputDat.DiffAgainst(repDat, GetBoolean(Features, ByGameValue));

                    // Finally output the diffed DatFile
                    string interOutDir = inputPath.GetOutputPath(OutputDir, GetBoolean(features, InplaceValue));
                    repDat.Write(interOutDir, overwrite: GetBoolean(features, InplaceValue));
                });
            }

            // Output DATs after replacing fields from a base DatFile
            if (updateMode.HasFlag(UpdateMode.BaseReplace))
            {
                // Loop through each input and apply the base DatFile
                Parallel.ForEach(inputPaths, Globals.ParallelOptions, inputPath =>
                {
                    // Parse the path to a new DatFile
                    DatFile repDat = DatFile.Create(userInputDat.Header.CloneFiltering());
                    repDat.Parse(inputPath, indexId: 1, keep: true);

                    // Perform additional processing steps
                    repDat.ApplyExtras(Extras);
                    repDat.ApplySplitting(GetSplitType(features), false);
                    repDat.ApplyFilter(Filter);
                    repDat.ApplyCleaning(Cleaner);

                    // Now replace the fields from the base DatFile
                    userInputDat.BaseReplace(repDat, updateFields, GetBoolean(features, OnlySameValue));

                    // Finally output the replaced DatFile
                    string interOutDir = inputPath.GetOutputPath(OutputDir, GetBoolean(features, InplaceValue));
                    repDat.Write(interOutDir, overwrite: GetBoolean(features, InplaceValue));
                });
            }

            // Merge all input files and write
            // This has to be last due to the SuperDAT handling
            if (updateMode.HasFlag(UpdateMode.Merge))
            {
                // If we're in SuperDAT mode, prefix all games with their respective DATs
                if (string.Equals(userInputDat.Header.Type, "SuperDAT", StringComparison.OrdinalIgnoreCase))
                {
                    userInputDat.ApplySuperDAT(inputPaths);
                }

                userInputDat.Write(OutputDir);
            }
        }
    }
コード例 #25
0
        /// <summary>
        /// Split a DAT by size of Rom
        /// </summary>
        /// <param name="datFile">Current DatFile object to split</param>
        /// <param name="chunkSize">Long value representing the total size to split at</param>
        /// <returns>Less Than and Greater Than DatFiles</returns>
        public static List <DatFile> SplitByTotalSize(DatFile datFile, long chunkSize)
        {
            // If the size is invalid, just return
            if (chunkSize <= 0)
            {
                return(new List <DatFile>());
            }

            // Create each of the respective output DATs
            InternalStopwatch watch = new InternalStopwatch($"Splitting DAT by total size");

            // Sort the DatFile by machine name
            datFile.Items.BucketBy(ItemKey.Machine, DedupeType.None);

            // Get the keys in a known order for easier sorting
            var keys = datFile.Items.SortedKeys;

            // Get the output list
            List <DatFile> datFiles = new List <DatFile>();

            // Initialize everything
            long    currentSize  = 0;
            long    currentIndex = 0;
            DatFile currentDat   = DatFile.Create(datFile.Header.CloneStandard());

            currentDat.Header.FileName    += $"_{currentIndex}";
            currentDat.Header.Name        += $"_{currentIndex}";
            currentDat.Header.Description += $"_{currentIndex}";

            // Loop through each machine
            foreach (string machine in keys)
            {
                // Get the current machine
                var items = datFile.Items[machine];
                if (items == null || !items.Any())
                {
                    logger.Error($"{machine} contains no items and will be skipped");
                    continue;
                }

                // Get the total size of the current machine
                long machineSize = 0;
                foreach (var item in items)
                {
                    if (item is Rom rom)
                    {
                        // TODO: Should there be more than just a log if a single item is larger than the chunksize?
                        machineSize += rom.Size ?? 0;
                        if ((rom.Size ?? 0) > chunkSize)
                        {
                            logger.Error($"{rom.GetName() ?? string.Empty} in {machine} is larger than {chunkSize}");
                        }
                    }
                }

                // If the current machine size is greater than the chunk size by itself, we want to log and skip
                // TODO: Should this eventually try to split the machine here?
                if (machineSize > chunkSize)
                {
                    logger.Error($"{machine} is larger than {chunkSize} and will be skipped");
                    continue;
                }

                // If the current machine size makes the current DatFile too big, split
                else if (currentSize + machineSize > chunkSize)
                {
                    datFiles.Add(currentDat);
                    currentSize = 0;
                    currentIndex++;
                    currentDat = DatFile.Create(datFile.Header.CloneStandard());
                    currentDat.Header.FileName    += $"_{currentIndex}";
                    currentDat.Header.Name        += $"_{currentIndex}";
                    currentDat.Header.Description += $"_{currentIndex}";
                }

                // Add the current machine to the current DatFile
                currentDat.Items[machine] = items;
                currentSize += machineSize;
            }

            // Add the final DatFile to the list
            datFiles.Add(currentDat);

            // Then return the list
            watch.Stop();
            return(datFiles);
        }
コード例 #26
0
ファイル: Verification.cs プロジェクト: SabreTools/SabreTools
        /// <summary>
        /// Verify a DatFile against a set of depots, leaving only missing files
        /// </summary>
        /// <param name="datFile">Current DatFile object to verify against</param>
        /// <param name="inputs">List of input directories to compare against</param>
        /// <returns>True if verification was a success, false otherwise</returns>
        public static bool VerifyDepot(DatFile datFile, List <string> inputs)
        {
            bool success = true;

            InternalStopwatch watch = new InternalStopwatch("Verifying all from supplied depots");

            // Now loop through and get only directories from the input paths
            List <string> directories = new List <string>();

            foreach (string input in inputs)
            {
                // Add to the list if the input is a directory
                if (Directory.Exists(input))
                {
                    logger.Verbose($"Adding depot: {input}");
                    directories.Add(input);
                }
            }

            // If we don't have any directories, we want to exit
            if (directories.Count == 0)
            {
                return(success);
            }

            // Now that we have a list of depots, we want to bucket the input DAT by SHA-1
            datFile.Items.BucketBy(ItemKey.SHA1, DedupeType.None);

            // Then we want to loop through each of the hashes and see if we can rebuild
            var keys = datFile.Items.SortedKeys.ToList();

            foreach (string hash in keys)
            {
                // Pre-empt any issues that could arise from string length
                if (hash.Length != Constants.SHA1Length)
                {
                    continue;
                }

                logger.User($"Checking hash '{hash}'");

                // Get the extension path for the hash
                string subpath = Utilities.GetDepotPath(hash, datFile.Header.InputDepot.Depth);

                // Find the first depot that includes the hash
                string foundpath = null;
                foreach (string directory in directories)
                {
                    if (File.Exists(Path.Combine(directory, subpath)))
                    {
                        foundpath = Path.Combine(directory, subpath);
                        break;
                    }
                }

                // If we didn't find a path, then we continue
                if (foundpath == null)
                {
                    continue;
                }

                // If we have a path, we want to try to get the rom information
                GZipArchive tgz      = new GZipArchive(foundpath);
                BaseFile    fileinfo = tgz.GetTorrentGZFileInfo();

                // If the file information is null, then we continue
                if (fileinfo == null)
                {
                    continue;
                }

                // Now we want to remove all duplicates from the DAT
                datFile.Items.GetDuplicates(new Rom(fileinfo))
                .AddRange(datFile.Items.GetDuplicates(new Disk(fileinfo)));
            }

            watch.Stop();

            // Set fixdat headers in case of writing out
            datFile.Header.FileName    = $"fixDAT_{datFile.Header.FileName}";
            datFile.Header.Name        = $"fixDAT_{datFile.Header.Name}";
            datFile.Header.Description = $"fixDAT_{datFile.Header.Description}";
            datFile.Items.ClearMarked();

            return(success);
        }
コード例 #27
0
        /// <summary>
        /// Split a DAT by best available hashes
        /// </summary>
        /// <param name="datFile">Current DatFile object to split</param>
        /// <returns>Dictionary of Field to DatFile mappings</returns>
        public static Dictionary <DatItemField, DatFile> SplitByHash(DatFile datFile)
        {
            // Create each of the respective output DATs
            InternalStopwatch watch = new InternalStopwatch($"Splitting DAT by best available hashes");

            // Create the set of field-to-dat mappings
            Dictionary <DatItemField, DatFile> fieldDats = new Dictionary <DatItemField, DatFile>();

            // TODO: Can this be made into a loop?
            fieldDats[DatItemField.Status] = DatFile.Create(datFile.Header.CloneStandard());
            fieldDats[DatItemField.Status].Header.FileName    += " (Nodump)";
            fieldDats[DatItemField.Status].Header.Name        += " (Nodump)";
            fieldDats[DatItemField.Status].Header.Description += " (Nodump)";

            fieldDats[DatItemField.SHA512] = DatFile.Create(datFile.Header.CloneStandard());
            fieldDats[DatItemField.SHA512].Header.FileName    += " (SHA-512)";
            fieldDats[DatItemField.SHA512].Header.Name        += " (SHA-512)";
            fieldDats[DatItemField.SHA512].Header.Description += " (SHA-512)";

            fieldDats[DatItemField.SHA384] = DatFile.Create(datFile.Header.CloneStandard());
            fieldDats[DatItemField.SHA384].Header.FileName    += " (SHA-384)";
            fieldDats[DatItemField.SHA384].Header.Name        += " (SHA-384)";
            fieldDats[DatItemField.SHA384].Header.Description += " (SHA-384)";

            fieldDats[DatItemField.SHA256] = DatFile.Create(datFile.Header.CloneStandard());
            fieldDats[DatItemField.SHA256].Header.FileName    += " (SHA-256)";
            fieldDats[DatItemField.SHA256].Header.Name        += " (SHA-256)";
            fieldDats[DatItemField.SHA256].Header.Description += " (SHA-256)";

            fieldDats[DatItemField.SHA1] = DatFile.Create(datFile.Header.CloneStandard());
            fieldDats[DatItemField.SHA1].Header.FileName    += " (SHA-1)";
            fieldDats[DatItemField.SHA1].Header.Name        += " (SHA-1)";
            fieldDats[DatItemField.SHA1].Header.Description += " (SHA-1)";

            fieldDats[DatItemField.MD5] = DatFile.Create(datFile.Header.CloneStandard());
            fieldDats[DatItemField.MD5].Header.FileName    += " (MD5)";
            fieldDats[DatItemField.MD5].Header.Name        += " (MD5)";
            fieldDats[DatItemField.MD5].Header.Description += " (MD5)";

            fieldDats[DatItemField.CRC] = DatFile.Create(datFile.Header.CloneStandard());
            fieldDats[DatItemField.CRC].Header.FileName    += " (CRC)";
            fieldDats[DatItemField.CRC].Header.Name        += " (CRC)";
            fieldDats[DatItemField.CRC].Header.Description += " (CRC)";

            fieldDats[DatItemField.NULL] = DatFile.Create(datFile.Header.CloneStandard());
            fieldDats[DatItemField.NULL].Header.FileName    += " (Other)";
            fieldDats[DatItemField.NULL].Header.Name        += " (Other)";
            fieldDats[DatItemField.NULL].Header.Description += " (Other)";

            // Now populate each of the DAT objects in turn
            Parallel.ForEach(datFile.Items.Keys, Globals.ParallelOptions, key =>
            {
                ConcurrentList <DatItem> items = datFile.Items[key];
                foreach (DatItem item in items)
                {
                    // If the file is not a Disk, Media, or Rom, continue
                    if (item.ItemType != ItemType.Disk && item.ItemType != ItemType.Media && item.ItemType != ItemType.Rom)
                    {
                        return;
                    }

                    // If the file is a nodump
                    if ((item.ItemType == ItemType.Rom && (item as Rom).ItemStatus == ItemStatus.Nodump) ||
                        (item.ItemType == ItemType.Disk && (item as Disk).ItemStatus == ItemStatus.Nodump))
                    {
                        fieldDats[DatItemField.Status].Items.Add(key, item);
                    }

                    // If the file has a SHA-512
                    else if ((item.ItemType == ItemType.Rom && !string.IsNullOrWhiteSpace((item as Rom).SHA512)))
                    {
                        fieldDats[DatItemField.SHA512].Items.Add(key, item);
                    }

                    // If the file has a SHA-384
                    else if ((item.ItemType == ItemType.Rom && !string.IsNullOrWhiteSpace((item as Rom).SHA384)))
                    {
                        fieldDats[DatItemField.SHA384].Items.Add(key, item);
                    }

                    // If the file has a SHA-256
                    else if ((item.ItemType == ItemType.Media && !string.IsNullOrWhiteSpace((item as Media).SHA256)) ||
                             (item.ItemType == ItemType.Rom && !string.IsNullOrWhiteSpace((item as Rom).SHA256)))
                    {
                        fieldDats[DatItemField.SHA256].Items.Add(key, item);
                    }

                    // If the file has a SHA-1
                    else if ((item.ItemType == ItemType.Disk && !string.IsNullOrWhiteSpace((item as Disk).SHA1)) ||
                             (item.ItemType == ItemType.Media && !string.IsNullOrWhiteSpace((item as Media).SHA1)) ||
                             (item.ItemType == ItemType.Rom && !string.IsNullOrWhiteSpace((item as Rom).SHA1)))
                    {
                        fieldDats[DatItemField.SHA1].Items.Add(key, item);
                    }

                    // If the file has an MD5
                    else if ((item.ItemType == ItemType.Disk && !string.IsNullOrWhiteSpace((item as Disk).MD5)) ||
                             (item.ItemType == ItemType.Media && !string.IsNullOrWhiteSpace((item as Media).MD5)) ||
                             (item.ItemType == ItemType.Rom && !string.IsNullOrWhiteSpace((item as Rom).MD5)))
                    {
                        fieldDats[DatItemField.MD5].Items.Add(key, item);
                    }

                    // If the file has a CRC
                    else if ((item.ItemType == ItemType.Rom && !string.IsNullOrWhiteSpace((item as Rom).CRC)))
                    {
                        fieldDats[DatItemField.CRC].Items.Add(key, item);
                    }

                    else
                    {
                        fieldDats[DatItemField.NULL].Items.Add(key, item);
                    }
                }
            });

            watch.Stop();
            return(fieldDats);
        }
コード例 #28
0
ファイル: Verify.cs プロジェクト: elijah0067/SabreTools
        public override void ProcessFeatures(Dictionary <string, Feature> features)
        {
            base.ProcessFeatures(features);

            // Get a list of files from the input datfiles
            var datfiles     = GetList(features, DatListValue);
            var datfilePaths = DirectoryExtensions.GetFilesOnly(datfiles);

            // Get feature flags
            TreatAsFile asFiles   = GetTreatAsFiles(features);
            bool        hashOnly  = GetBoolean(features, HashOnlyValue);
            bool        quickScan = GetBoolean(features, QuickValue);
            var         splitType = GetSplitType(features);

            // If we are in individual mode, process each DAT on their own
            if (GetBoolean(features, IndividualValue))
            {
                foreach (ParentablePath datfile in datfilePaths)
                {
                    // Parse in from the file
                    DatFile datdata = DatFile.Create();
                    datdata.Parse(datfile, int.MaxValue, keep: true);

                    // Perform additional processing steps
                    datdata.ApplyExtras(Extras);
                    datdata.ApplySplitting(splitType, true);
                    datdata.ApplyFilter(Filter);
                    datdata.ApplyCleaning(Cleaner);

                    // Set depot information
                    datdata.Header.InputDepot = Header.InputDepot.Clone() as DepotInformation;

                    // If we have overridden the header skipper, set it now
                    if (!string.IsNullOrEmpty(Header.HeaderSkipper))
                    {
                        datdata.Header.HeaderSkipper = Header.HeaderSkipper;
                    }

                    // If we have the depot flag, respect it
                    if (Header.InputDepot?.IsActive ?? false)
                    {
                        datdata.VerifyDepot(Inputs);
                    }
                    else
                    {
                        // Loop through and add the inputs to check against
                        logger.User("Processing files:\n");
                        foreach (string input in Inputs)
                        {
                            datdata.PopulateFromDir(input, asFiles: asFiles, hashes: quickScan ? Hash.CRC : Hash.Standard);
                        }

                        datdata.VerifyGeneric(hashOnly);
                    }

                    // Now write out if there are any items left
                    datdata.WriteStatsToConsole();
                    datdata.Write(OutputDir);
                }
            }
            // Otherwise, process all DATs into the same output
            else
            {
                InternalStopwatch watch = new InternalStopwatch("Populating internal DAT");

                // Add all of the input DATs into one huge internal DAT
                DatFile datdata = DatFile.Create();
                foreach (ParentablePath datfile in datfilePaths)
                {
                    datdata.Parse(datfile, int.MaxValue, keep: true);
                }

                // Perform additional processing steps
                datdata.ApplyExtras(Extras);
                datdata.ApplySplitting(splitType, true);
                datdata.ApplyFilter(Filter);
                datdata.ApplyCleaning(Cleaner);

                // Set depot information
                datdata.Header.InputDepot = Header.InputDepot.Clone() as DepotInformation;

                // If we have overridden the header skipper, set it now
                if (!string.IsNullOrEmpty(Header.HeaderSkipper))
                {
                    datdata.Header.HeaderSkipper = Header.HeaderSkipper;
                }

                watch.Stop();

                // If we have the depot flag, respect it
                if (Header.InputDepot?.IsActive ?? false)
                {
                    datdata.VerifyDepot(Inputs);
                }
                else
                {
                    // Loop through and add the inputs to check against
                    logger.User("Processing files:\n");
                    foreach (string input in Inputs)
                    {
                        datdata.PopulateFromDir(input, asFiles: asFiles, hashes: quickScan ? Hash.CRC : Hash.Standard);
                    }

                    datdata.VerifyGeneric(hashOnly);
                }

                // Now write out if there are any items left
                datdata.WriteStatsToConsole();
                datdata.Write(OutputDir);
            }
        }
コード例 #29
0
ファイル: Split.cs プロジェクト: elijah0067/SabreTools
        public override void ProcessFeatures(Dictionary <string, Library.Help.Feature> features)
        {
            base.ProcessFeatures(features);
            SplittingMode splittingMode = GetSplittingMode(features);

            // If we somehow have the "none" split type, return
            if (splittingMode == SplittingMode.None)
            {
                return;
            }

            // Get only files from the inputs
            List <ParentablePath> files = DirectoryExtensions.GetFilesOnly(Inputs, appendparent: true);

            // Loop over the input files
            foreach (ParentablePath file in files)
            {
                // Create and fill the new DAT
                DatFile internalDat = DatFile.Create(Header);
                internalDat.Parse(file);

                // Get the output directory
                OutputDir = file.GetOutputPath(OutputDir, GetBoolean(features, InplaceValue));

                // Extension splitting
                if (splittingMode.HasFlag(SplittingMode.Extension))
                {
                    (DatFile extADat, DatFile extBDat) = internalDat.SplitByExtension(GetList(features, ExtAListValue), GetList(features, ExtBListValue));

                    InternalStopwatch watch = new InternalStopwatch("Outputting extension-split DATs");

                    // Output both possible DatFiles
                    extADat.Write(OutputDir);
                    extBDat.Write(OutputDir);

                    watch.Stop();
                }

                // Hash splitting
                if (splittingMode.HasFlag(SplittingMode.Hash))
                {
                    Dictionary <Field, DatFile> typeDats = internalDat.SplitByHash();

                    InternalStopwatch watch = new InternalStopwatch("Outputting hash-split DATs");

                    // Loop through each type DatFile
                    Parallel.ForEach(typeDats.Keys, Globals.ParallelOptions, itemType =>
                    {
                        typeDats[itemType].Write(OutputDir);
                    });

                    watch.Stop();
                }

                // Level splitting
                if (splittingMode.HasFlag(SplittingMode.Level))
                {
                    logger.Warning("This feature is not implemented: level-split");
                    internalDat.SplitByLevel(
                        OutputDir,
                        GetBoolean(features, ShortValue),
                        GetBoolean(features, BaseValue));
                }

                // Size splitting
                if (splittingMode.HasFlag(SplittingMode.Size))
                {
                    (DatFile lessThan, DatFile greaterThan) = internalDat.SplitBySize(GetInt64(features, RadixInt64Value));

                    InternalStopwatch watch = new InternalStopwatch("Outputting size-split DATs");

                    // Output both possible DatFiles
                    lessThan.Write(OutputDir);
                    greaterThan.Write(OutputDir);

                    watch.Stop();
                }

                // Type splitting
                if (splittingMode.HasFlag(SplittingMode.Type))
                {
                    Dictionary <ItemType, DatFile> typeDats = internalDat.SplitByType();

                    InternalStopwatch watch = new InternalStopwatch("Outputting ItemType DATs");

                    // Loop through each type DatFile
                    Parallel.ForEach(typeDats.Keys, Globals.ParallelOptions, itemType =>
                    {
                        typeDats[itemType].Write(OutputDir);
                    });

                    watch.Stop();
                }
            }
        }
コード例 #30
0
        public override bool ProcessFeatures(Dictionary <string, SabreTools.Help.Feature> features)
        {
            // If the base fails, just fail out
            if (!base.ProcessFeatures(features))
            {
                return(false);
            }

            // Get feature flags
            int    workers      = GetInt32(features, WorkersInt32Value);
            string missingSha1s = GetString(features, MissingSha1sStringValue);

            // Make sure the db is set
            if (string.IsNullOrWhiteSpace(_db))
            {
                _db = "db.sqlite";
                _connectionString = $"Data Source={_db};Version = 3;";
            }

            // Make sure the file exists
            if (!File.Exists(_db))
            {
                EnsureDatabase(_db, _connectionString);
            }

            // Make sure the dats dir is set
            if (string.IsNullOrWhiteSpace(_dats))
            {
                _dats = "dats";
            }

            _dats = Path.Combine(PathTool.GetRuntimeDirectory(), _dats);

            // Make sure the folder exists
            if (!Directory.Exists(_dats))
            {
                Directory.CreateDirectory(_dats);
            }

            // First get a list of SHA-1's from the input DATs
            DatFile datroot = DatFile.Create();

            datroot.Header.Type = "SuperDAT";
            DatFromDir.PopulateFromDir(datroot, _dats, asFiles: TreatAsFile.NonArchive, hashes: Hash.Standard);
            datroot.Items.BucketBy(ItemKey.SHA1, DedupeType.None);

            // Create a List of dat hashes in the database (SHA-1)
            List <string> databaseDats = new List <string>();
            List <string> unneeded     = new List <string>();

            SqliteConnection dbc = new SqliteConnection(_connectionString);

            dbc.Open();

            // Populate the List from the database
            InternalStopwatch watch = new InternalStopwatch("Populating the list of existing DATs");

            string           query = "SELECT DISTINCT hash FROM dat";
            SqliteCommand    slc   = new SqliteCommand(query, dbc);
            SqliteDataReader sldr  = slc.ExecuteReader();

            if (sldr.HasRows)
            {
                sldr.Read();
                string hash = sldr.GetString(0);
                if (datroot.Items.ContainsKey(hash))
                {
                    datroot.Items.Remove(hash);
                    databaseDats.Add(hash);
                }
                else if (!databaseDats.Contains(hash))
                {
                    unneeded.Add(hash);
                }
            }

            datroot.Items.BucketBy(ItemKey.Machine, DedupeType.None, norename: true);

            watch.Stop();

            slc.Dispose();
            sldr.Dispose();

            // Loop through the Dictionary and add all data
            watch.Start("Adding new DAT information");
            foreach (string key in datroot.Items.Keys)
            {
                foreach (Rom value in datroot.Items[key])
                {
                    AddDatToDatabase(value, dbc);
                }
            }

            watch.Stop();

            // Now loop through and remove all references to old Dats
            if (unneeded.Count > 0)
            {
                watch.Start("Removing unmatched DAT information");

                query = "DELETE FROM dat WHERE";
                foreach (string dathash in unneeded)
                {
                    query += $" OR hash=\"{dathash}\"";
                }

                query = query.Replace("WHERE OR", "WHERE");
                slc   = new SqliteCommand(query, dbc);
                slc.ExecuteNonQuery();
                slc.Dispose();

                watch.Stop();
            }

            dbc.Dispose();
            return(true);
        }