static void ExtractHfs(string filename)
        {
            string basep = Path.GetDirectoryName(filename);
            string plain = Path.GetFileNameWithoutExtension(filename);

            using (HfsFile hfs = new HfsFile(filename))
            using (ZipFile zip = ZipFile.Create(basep + @"\" + plain + "_.zip"))
            {

                zip.BeginUpdate();

                foreach (HfsEntry hfsEntry in hfs)
                {
                    Console.WriteLine("Processing " + hfsEntry.Name);

                    try
                    {
                        Stream read = hfs.GetInputStream(hfsEntry);

                        zip.Add(new StreamLocalSourceZip(read), hfsEntry.Name);
                    }
                    catch (Exception e)
                    {
                        Console.WriteLine("Couldn't process " + hfsEntry.Name + ": " + e.Message);
                    }
                }

                if (hfs.ObfuscationKey != 0)
                {
                    zip.SetComment("extra_obscure");
                }

                Console.WriteLine("Compressing..");
                zip.CommitUpdate();
            }

            Console.WriteLine("Wrote to " + basep + @"\" + plain + "_.zip");
        }
        /// <summary>
        /// Create a new <see cref="HfsFile"/> whose data will be stored on a stream.
        /// </summary>
        /// <param name="outStream">The stream providing data storage.</param>
        /// <returns>Returns the newly created <see cref="HfsFile"/></returns>
        /// <exception cref="ArgumentNullException"><paramref name="outStream"> is null</paramref></exception>
        /// <exception cref="ArgumentException"><paramref name="outStream"> doesnt support writing.</paramref></exception>
        public static HfsFile Create(Stream outStream)
        {
            if (outStream == null)
            {
                throw new ArgumentNullException("outStream");
            }

            if (!outStream.CanWrite)
            {
                throw new ArgumentException("Stream is not writeable", "outStream");
            }

            if (!outStream.CanSeek)
            {
                throw new ArgumentException("Stream is not seekable", "outStream");
            }

            HfsFile result = new HfsFile();
            result.baseStream_ = outStream;
            return result;
        }
        /// <summary>
        /// Create a new <see cref="HfsFile"/> whose data will be stored in a file.
        /// </summary>
        /// <param name="fileName">The name of the archive to create.</param>
        /// <returns>Returns the newly created <see cref="HfsFile"/></returns>
        /// <exception cref="ArgumentNullException"><paramref name="fileName"></paramref> is null</exception>
        public static HfsFile Create(string fileName)
        {
            if (fileName == null)
            {
                throw new ArgumentNullException("fileName");
            }

            FileStream fs = File.Create(fileName);

            HfsFile result = new HfsFile();
            result.name_ = fileName;
            result.baseStream_ = fs;
            result.isStreamOwner = true;
            return result;
        }
 /// <summary>
 /// Initialise a new instance of <see cref="TestStatus"/>
 /// </summary>
 /// <param name="file">The <see cref="HfsFile"/> this status applies to.</param>
 public TestStatus(HfsFile file)
 {
     file_ = file;
 }
            /// <summary>
            /// Initialise a new instance of the <see cref="PartialInputStream"/> class.
            /// </summary>
            /// <param name="HfsFile">The <see cref="HfsFile"/> containing the underlying stream to use for IO.</param>
            /// <param name="start">The start of the partial data.</param>
            /// <param name="length">The length of the partial data.</param>
            public PartialInputStream(HfsFile HfsFile, long start, long length)
            {
                start_ = start;
                length_ = length;

                // Although this is the only time the Hfsfile is used
                // keeping a reference here prevents premature closure of
                // this Hfs file and thus the baseStream_.

                // Code like this will cause apparently random failures depending
                // on the size of the files and when garbage is collected.
                //
                // HfsFile z = new HfsFile (stream);
                // Stream reader = z.GetInputStream(0);
                // uses reader here....
                HfsFile_ = HfsFile;
                baseStream_ = HfsFile_.baseStream_;
                readPos_ = start;
                end_ = start + length;
            }
 /// <summary>
 /// Initializes a new instance of the <see cref="DiskArchiveStorage"/> class.
 /// </summary>
 /// <param name="file">The file.</param>
 public DiskArchiveStorage(HfsFile file)
     : this(file, FileUpdateMode.Safe)
 {
 }
        /// <summary>
        /// Initializes a new instance of the <see cref="DiskArchiveStorage"/> class.
        /// </summary>
        /// <param name="file">The file.</param>
        /// <param name="updateMode">The update mode.</param>
        public DiskArchiveStorage(HfsFile file, FileUpdateMode updateMode)
            : base(updateMode)
        {
            if (file.Name == null)
            {
                throw new HfsException("Cant handle non file archives");
            }

            fileName_ = file.Name;
        }
        void ModifyEntry(HfsFile workFile, HfsUpdate update)
        {
            workFile.WriteLocalEntryHeader(update);
            long dataStart = workFile.baseStream_.Position;

            // TODO: This is slow if the changes don't effect the data!!
            if (update.Entry.IsFile && (update.Filename != null))
            {
                using (Stream output = workFile.GetOutputStream(update.OutEntry))
                {
                    using (Stream source = this.GetInputStream(update.Entry))
                    {
                        CopyBytes(update, output, source, source.Length, true);
                    }
                }
            }

            long dataEnd = workFile.baseStream_.Position;
            update.Entry.CompressedSize = dataEnd - dataStart;
        }
        void CopyEntryDirect(HfsFile workFile, HfsUpdate update, ref long destinationPosition)
        {
            bool skipOver = false;
            if (update.Entry.Offset == destinationPosition)
            {
                skipOver = true;
            }

            if (!skipOver)
            {
                baseStream_.Position = destinationPosition;
                workFile.WriteLocalEntryHeader(update);
                destinationPosition = baseStream_.Position;
            }

            long sourcePosition = 0;

            const int NameLengthOffset = 26;

            // TODO: Add base for SFX friendly handling
            long entryDataOffset = update.Entry.Offset + NameLengthOffset;

            baseStream_.Seek(entryDataOffset, SeekOrigin.Begin);

            // Clumsy way of handling retrieving the original name and extra data length for now.
            // TODO: Stop re-reading name and data length in CopyEntryDirect.
            uint nameLength = ReadLEUshort();
            uint extraLength = ReadLEUshort();

            sourcePosition = baseStream_.Position + nameLength + extraLength;

            if (skipOver)
            {
                if (update.OffsetBasedSize != -1)
                    destinationPosition += update.OffsetBasedSize;
                else
                    // TODO: Find out why this calculation comes up 4 bytes short on some entries in ODT (Office Document Text) archives.
                    // WinHfs produces a warning on these entries:
                    // "caution: value of lrec.csize (compressed size) changed from ..."
                    destinationPosition +=
                        (sourcePosition - entryDataOffset) + NameLengthOffset +	// Header size
                        update.Entry.CompressedSize + GetDescriptorSize(update);
            }
            else
            {
                if (update.Entry.CompressedSize > 0)
                {
                    CopyEntryDataDirect(update, baseStream_, false, ref destinationPosition, ref sourcePosition);
                }
                CopyDescriptorBytesDirect(update, baseStream_, ref destinationPosition, sourcePosition);
            }
        }
        void CopyEntry(HfsFile workFile, HfsUpdate update)
        {
            workFile.WriteLocalEntryHeader(update);

            if (update.Entry.CompressedSize > 0)
            {
                const int NameLengthOffset = 26;

                long entryDataOffset = update.Entry.Offset + NameLengthOffset;

                // TODO: This wont work for SFX files!
                baseStream_.Seek(entryDataOffset, SeekOrigin.Begin);

                uint nameLength = ReadLEUshort();
                uint extraLength = ReadLEUshort();

                baseStream_.Seek(nameLength + extraLength, SeekOrigin.Current);

                CopyBytes(update, workFile.baseStream_, baseStream_, update.Entry.CompressedSize, false);
            }
            CopyDescriptorBytes(update, workFile.baseStream_, baseStream_);
        }
        void AddEntry(HfsFile workFile, HfsUpdate update)
        {
            Stream source = null;

            if (update.Entry.IsFile)
            {
                source = update.GetSource();

                if (source == null)
                {
                    source = updateDataSource_.GetSource(update.Entry, update.Filename);
                }
            }

            if (source != null)
            {
                using (source)
                {
                    long sourceStreamLength = source.Length;
                    if (update.OutEntry.Size < 0)
                    {
                        update.OutEntry.Size = sourceStreamLength;
                    }
                    else
                    {
                        // Check for errant entries.
                        if (update.OutEntry.Size != sourceStreamLength)
                        {
                            throw new HfsException("Entry size/stream size mismatch");
                        }
                    }

                    workFile.WriteLocalEntryHeader(update);

                    long dataStart = workFile.baseStream_.Position;
                    update.OutEntry.DataOffset = dataStart;

                    using (Stream output = workFile.GetOutputStream(update.OutEntry))
                    {
                        CopyBytes(update, output, source, sourceStreamLength, true);
                    }

                    long dataEnd = workFile.baseStream_.Position;

                    update.OutEntry.CompressedSize = update.OutEntry.Size = dataEnd - dataStart;

                }
            }
            else
            {
                workFile.WriteLocalEntryHeader(update);
                update.OutEntry.CompressedSize = 0;
            }
        }