public void ClearFATChain(uint[] Chain) { Streams.Reader r = Parent.Drive.Reader(); Streams.Writer w = new CLKsFATXLib.Streams.Writer(r.BaseStream); long buffersize = 0x1000; long lastoffset = 0;//VariousFunctions.BlockToFATOffset(Chain[0], Parent).DownToNearestCluster(buffersize); byte[] Buffer = new byte[buffersize]; for (int i = 0; i < Chain.Length; i++) { // Read the chain buffer if (lastoffset != VariousFunctions.BlockToFATOffset(Chain[i], Parent).DownToNearestCluster(0x1000)) { if (i != 0) { w.BaseStream.Position = lastoffset; w.Write(Buffer); } lastoffset = VariousFunctions.BlockToFATOffset(Chain[i], Parent).DownToNearestCluster(0x1000); r.BaseStream.Position = lastoffset; Buffer = r.ReadBytes((int)buffersize); } // Write the chain Streams.Writer mem = new CLKsFATXLib.Streams.Writer(new System.IO.MemoryStream(Buffer)); mem.BaseStream.Position = VariousFunctions.BlockToFATOffset(Chain[i], Parent) - VariousFunctions.BlockToFATOffset(Chain[i], Parent).DownToNearestCluster(0x1000); byte[] writing = new byte[0]; switch (Parent.PartitionInfo.EntrySize) { case 2: writing = BitConverter.GetBytes((ushort)0); break; case 4: writing = BitConverter.GetBytes(0); break; } mem.Write(writing); mem.Close(); if (i == Chain.Length - 1) { w.BaseStream.Position = lastoffset; w.Write(Buffer); } } }
/* AUTOMATICALLY CREATES SHIT */ public EntryData GetNewEntry(Folder Destination, uint Size, Geometry.Flags[] Flags, string EntryName) { if (!VariousFunctions.CheckFileName(EntryName)) { throw new ArgumentException("Invalid name: \"" + EntryName + "\"", "EntryName"); } EntryData newEntryData = new EntryData(); newEntryData.EntryOffset = GetNewEntryOffset(Destination); ushort Date = VariousFunctions.DateTimeToFATShort(DateTime.Now, true); ushort Time = VariousFunctions.DateTimeToFATShort(DateTime.Now, false); newEntryData.CreationDate = Date; newEntryData.CreationTime = Time; newEntryData.ModifiedDate = Date; newEntryData.ModifiedTime = Time; newEntryData.AccessDate = Date; newEntryData.AccessTime = Time; if (Flags.Length != 0) { newEntryData.Flags = VariousFunctions.FlagsToByte(Flags); } else { newEntryData.Flags = 0; } newEntryData.Size = Size; newEntryData.Name = EntryName; newEntryData.NameSize = (byte)EntryName.Length; if ((Size == 0 && Flags.Contains(Geometry.Flags.Directory)) || (Size != 0 && Flags.Length == 0)) { newEntryData.StartingCluster = Destination.Drive.GetFreeBlocks(Destination, 1, 0, 0, false)[0]; } else { newEntryData.StartingCluster = 0; } WriteFATChain(new uint[] { newEntryData.StartingCluster }); CreateNewEntry(newEntryData); return(newEntryData); }
public long GetNewEntryOffset(Folder Destination) { // Go to the last block since we want to speed things up and not have to go // through all of them EntryData[] Entries = EntryDataFromBlock(Destination.BlocksOccupied[Destination.BlocksOccupied.Length - 1]); if (Entries.Length == 0) { return(Destination.StartingOffset); } // If there isn't the maximum amount of entries for a cluster... if (Entries.Length < Destination.PartitionInfo.ClusterSize / 0x40) { foreach (EntryData e in Entries) { if (e.NameSize == 0xE5) { return(e.EntryOffset); } } return(Entries[Entries.Length - 1].EntryOffset + 0x40); } // Max amount of entries, let's add another cluster to our parent... else { uint[] NewBlocks = new uint[Destination.BlocksOccupied.Length + 1]; Array.Copy(Destination.BlocksOccupied, NewBlocks, Destination.BlocksOccupied.Length); NewBlocks[NewBlocks.Length - 1] = Destination.Drive.GetFreeBlocks((Folder)Destination, 1, 0, 0, false)[0]; byte[] FF = new byte[Destination.PartitionInfo.ClusterSize]; for (int i = 0; i < FF.Length; i++) { FF[i] = 0xFF; } Streams.Writer w = Destination.Drive.Writer(); w.BaseStream.Position = VariousFunctions.BlockToFATOffset(NewBlocks[NewBlocks.Length - 1], Destination); w.Write(FF); Destination.BlocksOccupied = NewBlocks; return(VariousFunctions.BlockToFATOffset(NewBlocks[NewBlocks.Length - 1], Destination)); } }
/// <summary> /// Creates a stream to the current drive /// </summary> /// <returns>Stream for the current drive</returns> public System.IO.Stream Stream() { // Else, try closing the stream, then re-create it, and return it if (thisStream == null || IsClosed) { switch (DriveType) { case DriveType.Backup: thisStream = new System.IO.FileStream(FilePath, System.IO.FileMode.Open); break; case DriveType.HardDisk: DeviceHandle = VariousFunctions.CreateHandle(DeviceIndex); thisStream = new System.IO.FileStream(DeviceHandle, System.IO.FileAccess.ReadWrite); break; case DriveType.USB: thisStream = new Streams.USBStream(USBPaths, System.IO.FileMode.Open); break; } } return(thisStream); }
public DateTime PartitionTimeStamp(Structs.PartitionInfo PI) { return(VariousFunctions.DateTimeFromFATInt((ushort)((PI.ID & ~0xFFFF) >> 8), (ushort)PI.ID)); }
/// <summary> /// Returns an array of free blocks based off of the number of blocks needed /// </summary> public uint[] GetFreeBlocks(Folder Partition, int blocksNeeded, uint StartBlock, long end, bool SecondLoop) { int Clustersize = 0x10000; uint Block = StartBlock; if (end == 0) { end = Partition.PartitionInfo.FATOffset + Partition.PartitionInfo.FATSize; } List <uint> BlockList = new List <uint>(); // Create our reader for the drive Streams.Reader br = Reader(); // Create our reader for the memory stream Streams.Reader mr = null; for (long i = VariousFunctions.DownToNearest200(VariousFunctions.BlockToFATOffset(StartBlock, Partition)); i < end; i += Clustersize) { //Set our position to i br.BaseStream.Position = i; byte[] buffer = new byte[0]; if ((end - i) < Clustersize) { buffer = VariousFunctions.ReadBytes(ref br, end - i); } else { //Read our buffer buffer = br.ReadBytes(Clustersize); } try { mr.Close(); } catch { } mr = new Streams.Reader(new System.IO.MemoryStream(buffer)); //Re-open our binary reader using the buffer/memory stream for (int j = 0; j < buffer.Length; j += (int)Partition.PartitionInfo.EntrySize, Block += (uint)Partition.PartitionInfo.EntrySize) { mr.BaseStream.Position = j; //If we've gotten all of our requested blocks... if (BlockList.Count == blocksNeeded) { //Close our reader -> break the loop mr.Close(); break; } //Read the next block entry byte[] reading = mr.ReadBytes((int)Partition.PartitionInfo.EntrySize); //For each byte in our reading for (int k = 0; k < reading.Length; k++) { //If the byte isn't null (if the block isn't open) if (reading[k] != 0x00) { //Break break; } //If we've reached the end of the array, and the last byte //is 0x00, then the block is free if (k == reading.Length - 1 && reading[k] == 0x00) { //Do some maths to get the block numbah long fOff = Partition.PartitionInfo.FATOffset; long blockPosition = (long)i + j; uint block = (uint)(blockPosition - fOff) / (uint)Partition.PartitionInfo.EntrySize; BlockList.Add(block); } } } //We're putting in one last check so that we don't loop more than we need to if (BlockList.Count == blocksNeeded) { break; } } //If we found the required amount of free blocks - return our list if (BlockList.Count == blocksNeeded) { return(BlockList.ToArray()); } //If we didn't find the amount of blocks required, but we started from a //block other than the first one... if (BlockList.Count < blocksNeeded && SecondLoop == false) { BlockList.AddRange(GetFreeBlocks(Partition, blocksNeeded - BlockList.Count, 1, VariousFunctions.DownToNearest200(VariousFunctions.BlockToFATOffset(StartBlock, Partition)), true)); return(BlockList.ToArray()); } //We didn't find the amount of free blocks required, meaning we're ref of //disk space if (BlockList.Count != blocksNeeded) { throw new Exception("Out of Xbox 360 hard disk space"); } return(BlockList.ToArray()); }
public uint[] GetBlocksOccupied() { List <uint> Blocks = new List <uint>(); Streams.Reader r = Parent.Drive.Reader(); Blocks.Add(Parent.StartingCluster); byte[] Buffer = new byte[0x1000]; int buffersize = 0x1000; long lastoffset = 0; for (int i = 0; i < Blocks.Count; i++) { r.BaseStream.Position = VariousFunctions.BlockToFATOffset(Blocks[i], Parent).DownToNearestCluster(0x1000); // We use this so that we aren't reading the same buffer // a zillion times if (r.BaseStream.Position != lastoffset) { lastoffset = r.BaseStream.Position; Buffer = r.ReadBytes(buffersize); } Streams.Reader r1 = new CLKsFATXLib.Streams.Reader(new System.IO.MemoryStream(Buffer)); int OffsetInBuffer = (int)(VariousFunctions.BlockToFATOffset(Blocks[i], Parent) - VariousFunctions.BlockToFATOffset(Blocks[i], Parent).DownToNearestCluster(0x1000)); r1.BaseStream.Position = OffsetInBuffer; switch (Parent.PartitionInfo.EntrySize) { case 2: ushort Value = r1.ReadUInt16(); if (Value != 0xFFFF && Value != 0xFFF8) { if (Value == 0) { EntryData ed = Parent.EntryData; ed.NameSize = 0xE5; CreateNewEntry(ed); if (Blocks.Count > 0) { ClearFATChain(Blocks.ToArray()); } throw new Exception(string.Format("Bad FAT chain in file or folder {0}\r\nEntry Offset: 0x{1}\r\nLast block in FAT: 0x{2}\r\nEntry marked as deleted to avoid further errors! Please reload this device", Parent.FullPath, Parent.EntryOffset.ToString("X"), Blocks.Last().ToString("X"))); } Blocks.Add(Value); } break; case 4: uint Value2 = r1.ReadUInt32(); if (Value2 != 0xFFFFFFFF && Value2 != 0xFFFFFFF8) { if (Value2 == 0) { EntryData ed = Parent.EntryData; ed.NameSize = 0xE5; CreateNewEntry(ed); if (Blocks.Count > 0) { ClearFATChain(Blocks.ToArray()); } throw new Exception(string.Format("Bad FAT chain in file or folder {0}\r\nEntry Offset: 0x{1}\r\nLast block in FAT: 0x{2}\r\nEntry marked as deleted to avoid further errors! Please reload this device", Parent.FullPath, Parent.EntryOffset.ToString("X"), Blocks.Last().ToString("X"))); } Blocks.Add(Value2); } break; } r1.Close(); } return(Blocks.ToArray()); }
public EntryData[] EntryDataFromBlock(uint Block) { bool Break = false; List <EntryData> eList = new List <EntryData>(); // Get our binary reader Streams.Reader r1 = Parent.Drive.Reader(); r1.BaseStream.Position = VariousFunctions.GetBlockOffset(Block, Parent); /* Parent.PartitionInfo.Clusters / 0x40 / 0x8 because if each * entry is 0x40 in length and the cluster is filled to the * max with cluster entries, then we can do division to get * the number of entries that would be in that cluster * the 0x8 part is because on drives we have to read in intervals * of 0x200 right? So if Parent.PartitionInfo.Clusters / 0x40 = 0x100, * then that means that there are 0x100 entries per cluster... * divide that by 8 (the number of clusters within a 0x200 interval) and * that's how many shits we have to go forward */ for (int j = 0; j < Parent.PartitionInfo.ClusterSize / 0x1000; j++) { // Increment our position // Open another reader using a memory stream long r1Position = r1.BaseStream.Position; Streams.Reader r = new CLKsFATXLib.Streams.Reader(new System.IO.MemoryStream(r1.ReadBytes(0x1000))); for (int k = 0; k < (0x1000 / 0x40); k++) { // Check to see if we've passed the last entry... uint val = r.ReadUInt32(); if (val == 0x0 || val == 0xFFFFFFFF) { Break = true; break; } // Go back four bytes because we just checked the next four... r.BaseStream.Position -= 4; long StartOffset = r.BaseStream.Position; EntryData e = new EntryData(); e.EntryOffset = r.BaseStream.Position + r1Position; e.NameSize = r.ReadByte(); e.Flags = r.ReadByte(); /* Because some f*****g smart guy decided to put the * deleted flag in the name size field, we have to check * if it's deleted or not...*/ if (e.NameSize == 0xE5) { // Fuckers e.Name = Encoding.ASCII.GetString(r.ReadBytes(0x2A)); } else { e.Name = Encoding.ASCII.GetString(r.ReadBytes(e.NameSize)); } r.BaseStream.Position = StartOffset + 0x2C; e.StartingCluster = r.ReadUInt32(); e.Size = r.ReadUInt32(); e.CreationDate = r.ReadUInt16(); e.CreationTime = r.ReadUInt16(); e.AccessDate = r.ReadUInt16(); e.AccessTime = r.ReadUInt16(); e.ModifiedDate = r.ReadUInt16(); e.ModifiedTime = r.ReadUInt16(); eList.Add(e); } r.Close(); if (Break) { break; } } return(eList.ToArray()); }
//uint dicks(ref uint r3, ref uint r4, ref uint r6, ref uint r7, ref uint r8, ref uint r9, ref uint r10) //{ //} public ulong GetFreeSpace() { // Our return ulong Return = 0; ulong ClusterSize = (ulong)this.ClusterSize(); // Get our position long positionya = FATOffset; // Get our end point long toBeLessThan = FATOffset + RealFATSize(); // Get our IO Streams.Reader io = FATXDrive.Reader(); // Set the position io.BaseStream.Position = positionya; // Start reading! for (long dick = io.BaseStream.Position; dick < toBeLessThan; dick += 0x200) { bool BreakAndShit = false; // Set the position io.BaseStream.Position = dick; // Read our buffer byte[] Buffer = null; if ((dick - FATOffset).DownToNearest200() == (toBeLessThan - FATOffset).DownToNearest200()) { byte[] Temp = io.ReadBytes(0x200); Buffer = new byte[(toBeLessThan - FATOffset) - (dick - FATOffset).DownToNearest200()]; Array.Copy(Temp, 0, Buffer, 0, Buffer.Length); } else { Buffer = io.ReadBytes(0x200); } // Length to loop for (used for the end so we can read ONLY usable partitions) long Length = Buffer.Length; if (dick == VariousFunctions.DownToNearest200(toBeLessThan)) { Length = toBeLessThan - VariousFunctions.DownToNearest200(toBeLessThan); BreakAndShit = true; } // Check the values Streams.Reader ioya = new Streams.Reader(new System.IO.MemoryStream(Buffer)); for (int i = 0; i < Length; i += EntrySize) { // This size will be off by a few megabytes, no big deal in my opinion if (EntrySize == 2) { ushort Value = ioya.ReadUInt16(); if (Value == 0) { Return += ClusterSize; } } else { if (ioya.ReadUInt32() == 0) { Return += ClusterSize; } } } ioya.Close(); if (BreakAndShit) { break; } } return(Return); }
/// <summary> /// TOTAL size (includes padding) of the File Allocation Table (in bytes) /// FOR REAL SIZE, CALL TO RealFATSize(); /// </summary> public long FATSize() { if (fatsize != 0) { if (!SizeChecked) { Streams.Reader r = FATXDrive.Reader(); r.BaseStream.Position = Partition.Offset + fatsize + 0x1000; while (true) { if (r.ReadUInt32() == 0x0) { fatsize += 0x1000; r.BaseStream.Position += 0x1000 - 0x4; } else { break; } } SizeChecked = true; } return(fatsize); } if (Partition.Offset == (long)Geometry.HDDOffsets.System_Extended) { return(0x5000); } else if (Partition.Offset == (long)Geometry.HDDOffsets.System_Cache) { return(0x7000); } #region old //long size = 0; //if (Partition.Offset == 0x20000000 && FATXDrive.IsUSB) //{ // System.IO.FileInfo fi = new System.IO.FileInfo(FATXDrive.DumpPath + "\\Data0001"); // size = fi.Length - 0x1000; // //Return the size. // return size; //} //else //{ // //This gets the size // size = ((PartitionSize() / ClusterSize()) * EntrySize); // //We need to round up to the nearest 0x1000 byte boundary. // long sizeToAdd = (0x1000 - (size % 0x1000)); // if (!FATXDrive.IsUSB) // { // size += sizeToAdd; // } // //Return the size. // return size; //} #endregion //Code that rounds up to nearest cluster... long size = 0; #region shit //if (Partition.Offset == 0x20000000 && FATXDrive.IsUSB) //{ // System.IO.FileInfo fi = new System.IO.FileInfo(FATXDrive.DumpPath + "\\Data0001"); // size = fi.Length - 0x1000; // //Ghetto // Streams.Reader ir = FATXDrive.Reader(); // //Return the size. // return size; //} //else //{ #endregion //This gets the size size = (((PartitionSize() / ClusterSize())) * EntrySize); //We need to round up to the nearest blabhlabhalkhdflkasdf byte boundary. size = VariousFunctions.UpToNearestCluster(size + 0x1000, ClusterSize() / EntrySize) - 0x1000; //long sizeToAdd = (0x1000 - (size % 0x1000)); //size += sizeToAdd; //Return the size. return(size); //uint r24 = 0, r3, r6, r23, r10, r11, r27, r22, r25, r26, r28, r29, r30, r31; //if (Partition.Size == 0) //{ //} //else //{ // r11 = r27 & 0xFF; // if (r11 == 2) // { // r11 = r22; // r10 = r11 + r29; // r11--; // r10--; // r29 = r10 & (~r11); // r30 = r29; // } // else // { // // break here // } // r10 = 1; // r3 = dicks(ref r28, ref r26, ref r27, ref r25, ref r31, ref r24, ref r10); //} }
// Do not feel like recoding this function. public void CreateNewEntry(EntryData Edata) { Streams.Reader br = Parent.Drive.Reader(); //Set our position so that we can read the entry location br.BaseStream.Position = VariousFunctions.DownToNearest200(Edata.EntryOffset); byte[] buffer = br.ReadBytes(0x200); //Create our binary writer Streams.Writer bw = new Streams.Writer(new System.IO.MemoryStream(buffer)); //Set our position to where the entry is long EntryOffset = Edata.EntryOffset - VariousFunctions.DownToNearest200(Edata.EntryOffset); bw.BaseStream.Position = EntryOffset; //Write our entry bw.Write(Edata.NameSize); bw.Write(Edata.Flags); bw.Write(Encoding.ASCII.GetBytes(Edata.Name)); if (Edata.NameSize != 0xE5) { int FFLength = 0x2A - Edata.NameSize; byte[] FF = new byte[FFLength]; for (int i = 0; i < FFLength; i++) { FF[i] = 0xFF; } bw.Write(FF); } else { bw.BaseStream.Position += 0x2A - Edata.Name.Length; } //Right here, we need to make everything a byte array, as it feels like writing //everything in little endian for some reason... byte[] StartingCluster = BitConverter.GetBytes(Edata.StartingCluster); Array.Reverse(StartingCluster); bw.Write(StartingCluster); byte[] Size = BitConverter.GetBytes(Edata.Size); Array.Reverse(Size); bw.Write(Size); //Write ref the creation date/time 3 times byte[] CreationDate = BitConverter.GetBytes(Edata.CreationDate); byte[] CreationTime = BitConverter.GetBytes(Edata.CreationTime); byte[] AccessDate = BitConverter.GetBytes(Edata.AccessDate); byte[] AccessTime = BitConverter.GetBytes(Edata.AccessTime); byte[] ModifiedDate = BitConverter.GetBytes(Edata.ModifiedDate); byte[] ModifiedTime = BitConverter.GetBytes(Edata.ModifiedTime); Array.Reverse(CreationDate); Array.Reverse(CreationTime); Array.Reverse(AccessDate); Array.Reverse(AccessTime); Array.Reverse(ModifiedDate); Array.Reverse(ModifiedTime); bw.Write(CreationDate); bw.Write(CreationTime); bw.Write(AccessDate); bw.Write(AccessTime); bw.Write(ModifiedDate); bw.Write(ModifiedTime); //Close our writer bw.Close(); //Get our IO bw = Parent.Drive.Writer(); bw.BaseStream.Position = VariousFunctions.DownToNearest200(Edata.EntryOffset); //Write ref our buffer bw.Write(buffer); }