static void ProcessClusterHierarchy(OverlapRemovalCluster root, ClusterDelegate worker)
        {
            var stack = new Stack<ClusterItem>();
            stack.Push(new ClusterItem(root));
            while (stack.Count > 0)
            {
                // Keep the cluster on the stack until we're done with its children.
                var item = stack.Peek();
                int prevStackCount = stack.Count;
                if (!item.ChildrenHaveBeenPushed)
                {
                    item.ChildrenHaveBeenPushed = true;
                    foreach (var childCluster in item.Cluster.Clusters)
                    {
                        stack.Push(new ClusterItem(childCluster));
                    }
                    if (stack.Count > prevStackCount)
                    {
                        continue;
                    }
                } // endif !node.ChildrenHaveBeenPushed

                // No children to push so pop and process this cluster.
                Debug.Assert(stack.Peek() == item, "stack.Peek() should be 'item'");
                stack.Pop();
                worker(item.Cluster);
            }
        }
Exemple #2
0
        public int Carve()
        {
            int             filesRestored = 0;
            RootDirectory   r;
            ClusterDelegate getClusterNo = delegate(int sector)
            { return(((sector - DataRegion_start_sector) / SectorsPerCluster) + Constants.START_CLUSTER); };

            List <RootDirectory> root_entries = new List <RootDirectory>();

            uint
                start_cluster_for_file = Constants.NULLED,
                fileSize = Constants.NULLED,
                start_sector_for_file = Constants.NULLED;
            bool foundHeader          = false;


            /*
             * Read all root directory entries
             */
            for (int j = 0; j < RootDirectory_size; j++)
            {
                byte[][] RootDirectoryEntries = split(getSector(j + RootDirectory_start_sector), Constants.RD_ENTRY_LENGTH);
                for (int i = 0; i < RootDirectoryEntries.Length; i++)
                {
                    if (!RootDirectoryEntries[i].All(singlByte => singlByte == 0))
                    {
                        root_entries.Add(new RootDirectory(RootDirectoryEntries[i]));
                    }
                }
            }

            /*
             * This for-loop reads each cluster in the data region and
             * changes the FAT region (as necessary).
             */
            for (int sector = DataRegion_start_sector; sector < data.Length / BytesPerSector && sector < ushort.MaxValue; sector++)
            {
                /*
                 * Prepare yourself for unneccessary unit and int casts
                 * and cluster and sector conversions.
                 */
                int type = HeaderChecker.checkClusterForFooterAndHeader(getSector(sector), ref fileSize, ref fileSize);

                switch (type)
                {
                case (int)headerFooter.GIFHeader:
                case (int)headerFooter.JPGHeader:
                case (int)headerFooter.PNGHeader:
                    if (foundHeader)
                    {
                        throw new Exception();
                    }
                    else
                    {
                        foundHeader = true;
                    }
                    start_sector_for_file = (uint)sector;
                    break;

                case (int)headerFooter.BMPHeader:
                    if (foundHeader)
                    {
                        throw new Exception();
                    }
                    start_cluster_for_file = (uint)getClusterNo(sector);

                    if (changeFATTable(start_cluster_for_file, (uint)(Math.Ceiling((double)fileSize / bytesPerCluster))))
                    {
                        r               = new RootDirectory();
                        r.FileName      = (++filesRestored).ToString();
                        r.FileExtension = HeaderChecker.getExtension(type);
                        r.byteSize      = fileSize;
                        r.start_cluster = (ushort)start_cluster_for_file;
                        root_entries.Add(r);
                    }
                    break;

                case (int)headerFooter.GIFFooter:
                case (int)headerFooter.JPGFooter:
                case (int)headerFooter.PNGFooter:
                    if (!foundHeader)
                    {
                        throw new Exception();
                    }
                    else
                    {
                        foundHeader = false;
                    }
                    int  aa2 = getClusterNo(sector);
                    uint file_cluster_length = (uint)(getClusterNo(sector) - getClusterNo((int)start_sector_for_file)) + 1;

                    /*
                     * Oh my god, what have I created.
                     */
                    if (changeFATTable((uint)getClusterNo((int)start_sector_for_file), file_cluster_length))
                    {
                        r               = new RootDirectory();
                        r.FileName      = (++filesRestored).ToString();
                        r.FileExtension = HeaderChecker.getExtension(type);

                        int lastSectorLength = RemoveTrailingZeros(getSector(sector)).Length;
                        r.byteSize = (uint)(lastSectorLength + ((sector - start_sector_for_file) * BytesPerSector));

                        r.start_cluster = (ushort)getClusterNo((int)start_sector_for_file);
                        root_entries.Add(r);
                    }
                    break;

                case (int)headerFooter.Invalid:
                    break;

                default:
                    throw new Exception();
                }
            }



            if (filesRestored != 0)
            {
                //Code for reading the current root directories can also be placed here.

                /*
                 * Get the Root directory data in terms of sector byte data
                 */
                int           clustersToReserve = (int)(Math.Floor(((double)root_entries.Count * Constants.RD_ENTRY_LENGTH) / BytesPerSector));
                List <byte[]> dataToWrite       = new List <byte[]>();

                foreach (RootDirectory rr in root_entries)
                {
                    dataToWrite.Add(rr.ByteData);
                }

                int k = 0;
                foreach (byte[] b in dataToWrite)
                {
                    foreach (byte bb in b)
                    {
                        data[RootDirectory_start_sector * BytesPerSector + k++] = bb;
                    }
                }
            }

            return(filesRestored);
        }
        /// <summary>
        /// Calculate metric for a single cluser of sequences (all stored sequences), 
        /// and write metric data to file/s if required.
        /// </summary>
        public void ProcessSequences()
        {
            bool isGood = true;
            if (allSequences != null && allSequences.Count > 0 && !isComplete) // do the following only if there are sequences to be processed
            {
                clusterCount++;
                ClusterMetric metric = new ClusterMetric(expectedPloidy, numSamples);

                // Initialise metric output file/s
                InitMetricOutputFiles();

                // Initialise bam output file/s
                InitBamOutputFiles();

                // Perform core metric calculations on cluster sequences
                metric.Calculate(allSequences);
                isGood = GoodOrBad(metric);

                // Get haplotype information
                if(haplotypingEnabled && expectedPloidy == 2)
                {
                    GetHaplotypeInfo(ref metric, ref isGood);
                }
                if(isGood) { ++goodCount; }
                Console.WriteLine(metric.ToString() + "\t" + (isGood ? Properties.Resources.GOOD_CLUSTER : Properties.Resources.BAD_CLUSTER));

                // Get statistics from the metric for this new cluster
                CreateSummaryArrays(metric, isGood);
                SetOverviewStats(metric, isGood);

                // Output sequences to metric file/s and/or filtered bam file
                WriteToMetricOutputFiles(metric, isGood);
                AddToOutputBamQueueOrDispose(metric, isGood);

                // If the bam file is not currently being written to, and there are sequences in the queue ready to be
                // written, launch a new thread to perform the writing to file
                if (writeToFilteredBam && canWriteToBam && bamOutputQueue.Count > 0)
                {
                    canWriteToBam = false;
                    ClusterDelegate runner = new ClusterDelegate(WriteToBam);
                    runner.BeginInvoke(null, null);
                }
            }

            // Now that all processing has been performed for the current cluster, if the handler has been
            // aborted, perform any final file outputs
            if(aborted)
            {
                SetComplete(false);
            }
        }