public override ICommandResult Execute(IDictionary <string, string> vars, params string[] args) { if (args.Length != 3) { return(ArgsCountError(3, args)); } SubstituteVariables(vars, args); string archivePath = args[0]; string destPath = args[1]; string destFile = args[2]; if (string.IsNullOrWhiteSpace(archivePath)) { return(Error("the gzip archive path was empty or white space")); } if (string.IsNullOrWhiteSpace(destPath)) { return(Error("the destination folder path was empty or white space")); } if (string.IsNullOrWhiteSpace(destFile)) { return(Error("the destination filename was empty or white space")); } ArchiveUtils.ExtractGZip(archivePath, destPath, destFile); return(Success()); }
public string PerformPatch() { SafeDirectory.SetCurrentDirectory(MiscUtils.WORKING_FOLDER); UpdateProgress(0); IEnumerable <string> commands = GetCommands(_commandsText); foreach (var command in commands) { ExecuteCommand(command); } var fullOutputFileName = Path.Combine(SafeDirectory.GetCurrentDirectory(), MiscUtils.OUTPUT_FIRMWARE_NAME); SafeDirectory.SetCurrentDirectory(MiscUtils.OUTPUT_FOLDER_NAME); string currentDirectory = SafeDirectory.GetCurrentDirectory(); ArchiveUtils.CreateSample(fullOutputFileName, null, MiscUtils.GetAllFileInFodler(currentDirectory)); UpdateProgress(300); return(fullOutputFileName); }
private void writeAsciiLong(long number, int length, int radix) //throws IOException { StringBuilder tmp = new StringBuilder(); String tmpStr; if (radix == 16) { tmp.Append(java.lang.Long.toHexString(number)); } else if (radix == 8) { tmp.Append(java.lang.Long.toOctalString(number)); } else { tmp.Append("" + number); } if (tmp.Length <= length) { long insertLength = length - tmp.Length; for (int pos = 0; pos < insertLength; pos++) { tmp.Insert(0, "0"); } tmpStr = tmp.toString(); } else { tmpStr = tmp.ToString().Substring(tmp.Length - length); } byte[] b = ArchiveUtils.toAsciiBytes(tmpStr); outJ.write(b); count(b.Length); }
public override ICommandResult Execute(IDictionary <string, string> vars, params string[] args) { if (args.Length != 2) { return(ArgsCountError(2, args)); } SubstituteVariables(vars, args); string zip = args[0]; string folder = args[1]; if (string.IsNullOrWhiteSpace(zip)) { return(Error("the zip archive path was empty or white space")); } if (string.IsNullOrWhiteSpace(folder)) { return(Error("the destination unzip folder path was empty or white space")); } ArchiveUtils.ExtractZipFile(zip, null, folder); return(Success()); }
private void writeHeader(CpioArchiveEntry e) //throws IOException { switch (e.getFormat()) { case CpioConstants.FORMAT_NEW: outJ.write(ArchiveUtils.toAsciiBytes(CpioConstants.MAGIC_NEW)); count(6); writeNewEntry(e); break; case CpioConstants.FORMAT_NEW_CRC: outJ.write(ArchiveUtils.toAsciiBytes(CpioConstants.MAGIC_NEW_CRC)); count(6); writeNewEntry(e); break; case CpioConstants.FORMAT_OLD_ASCII: outJ.write(ArchiveUtils.toAsciiBytes(CpioConstants.MAGIC_OLD_ASCII)); count(6); writeOldAsciiEntry(e); break; case CpioConstants.FORMAT_OLD_BINARY: bool swapHalfWord = true; writeBinaryLong(CpioConstants.MAGIC_OLD_BINARY, 2, swapHalfWord); writeOldBinaryEntry(e, swapHalfWord); break; } }
private long readAsciiLong(int length, int radix) // throws IOException { byte [] tmpBuffer = new byte[length]; readFully(tmpBuffer, 0, tmpBuffer.Length); return(java.lang.Long.parseLong(ArchiveUtils.toAsciiString(tmpBuffer), radix)); }
public BlobOutputDto MergeChunks(IList <BlobEntity> entities) { if (entities.Count == 0) { return(null); } var dto = new BlobOutputDto(); dto.Id = entities[0].BlobId; dto.TimeStamp = Mapper.Map <DateTime, long>(entities[0].TimeStamp); dto.Remark = entities[0].Remark; // 合并各个分块的内容 byte[] bodyBytes; if (entities.Count == 1) { bodyBytes = entities[0].Body; } else { bodyBytes = new byte[entities.Sum(e => e.Body.Length)]; var bodyBytesStart = 0; foreach (var entity in entities) { Array.Copy(entity.Body, 0, bodyBytes, bodyBytesStart, entity.Body.Length); bodyBytesStart += entity.Body.Length; } } // 解压缩合并后的内容 bodyBytes = ArchiveUtils.DecompressFromGZip(bodyBytes); dto.Body = Mapper.Map <byte[], string>(bodyBytes); return(dto); }
/** * Writes an ASCII string to the stream followed by \0 * @param str the String to write * @throws IOException if the string couldn't be written */ private void writeCString(String str) //throws IOException { byte[] b = ArchiveUtils.toAsciiBytes(str); outJ.write(b); outJ.write('\0'); count(b.Length + 1); }
/** * Reads the next CPIO file entry and positions stream at the beginning of * the entry data. * * @return the CPIOArchiveEntry just read * @throws IOException * if an I/O error has occurred or if a CPIO file error has * occurred */ public CpioArchiveEntry getNextCPIOEntry() //throws IOException { ensureOpen(); if (this.entry != null) { closeEntry(); } byte [] magic = new byte[2]; readFully(magic, 0, magic.Length); if (CpioUtil.byteArray2long(magic, false) == CpioConstants.MAGIC_OLD_BINARY) { this.entry = readOldBinaryEntry(false); } else if (CpioUtil.byteArray2long(magic, true) == CpioConstants.MAGIC_OLD_BINARY) { this.entry = readOldBinaryEntry(true); } else { byte [] more_magic = new byte[4]; readFully(more_magic, 0, more_magic.Length); byte [] tmp = new byte[6]; java.lang.SystemJ.arraycopy(magic, 0, tmp, 0, magic.Length); java.lang.SystemJ.arraycopy(more_magic, 0, tmp, magic.Length, more_magic.Length); String magicString = ArchiveUtils.toAsciiString(tmp); if (magicString.equals(CpioConstants.MAGIC_NEW)) { this.entry = readNewEntry(false); } else if (magicString.equals(CpioConstants.MAGIC_NEW_CRC)) { this.entry = readNewEntry(true); } else if (magicString.equals(CpioConstants.MAGIC_OLD_ASCII)) { this.entry = readOldAsciiEntry(); } else { throw new java.io.IOException("Unknown magic [" + magicString + "]. Occured at byte: " + getBytesRead()); } } this.entryBytesRead = 0; this.entryEOF = false; this.crc = 0; if (this.entry.getName().equals(CpioConstants.CPIO_TRAILER)) { this.entryEOF = true; return(null); } return(this.entry); }
/** * Put an entry on the output stream. This writes the entry's * header record and positions the output stream for writing * the contents of the entry. Once this method is called, the * stream is ready for calls to write() to write the entry's * contents. Once the contents are written, closeArchiveEntry() * <B>MUST</B> be called to ensure that all buffered data * is completely written to the output stream. * * @param archiveEntry The TarEntry to be written to the archive. * @throws IOException on error * @throws ClassCastException if archiveEntry is not an instance of TarArchiveEntry */ public override void putArchiveEntry(ArchiveEntry archiveEntry) //throws IOException { if (finished) { throw new java.io.IOException("Stream has already been finished"); } TarArchiveEntry entry = (TarArchiveEntry)archiveEntry; if (entry.getName().length() >= TarConstants.NAMELEN) { if (longFileMode == LONGFILE_GNU) { // create a TarEntry for the LongLink, the contents // of which are the entry's name TarArchiveEntry longLinkEntry = new TarArchiveEntry(TarConstants.GNU_LONGLINK, TarConstants.LF_GNUTYPE_LONGNAME); byte[] nameBytes = ArchiveUtils.toAsciiBytes(entry.getName()); longLinkEntry.setSize(nameBytes.Length + 1); // +1 for NUL putArchiveEntry(longLinkEntry); write(nameBytes); write(0); // NUL terminator closeArchiveEntry(); } else if (longFileMode != LONGFILE_TRUNCATE) { throw new java.lang.RuntimeException("file name '" + entry.getName() + "' is too long ( > " + TarConstants.NAMELEN + " bytes)"); } } entry.writeEntryHeader(recordBuf); buffer.writeRecord(recordBuf); currBytes = 0; if (entry.isDirectory()) { currSize = 0; } else { currSize = entry.getSize(); } currName = entry.getName(); haveUnclosedEntry = true; }
public async Task <Guid> Put(BlobInputDto dto) { // 获取提交过来的内容, 计算校验值 var originalBytes = Mapper.Map <string, byte[]>(dto.Body); if (dto.IsCompressed) { originalBytes = ArchiveUtils.DecompressFromGZip(originalBytes); } var hash = HashUtils.GetSHA256Hash(originalBytes); using (var transaction = await _repository.BeginTransactionAsync()) { // 如果有相同校验值的blob, 返回该blob var existBlobId = await _repository.QueryNoTrackingAsync(q => q.Where(x => x.BodyHash == hash) .Select(x => x.BlobId) .FirstOrDefaultAsyncTestable()); if (existBlobId != Guid.Empty) { return(existBlobId); } } // 对内容进行分块 var blobId = PrimaryKeyUtils.Generate <Guid>(); var chunks = SplitChunks(blobId, originalBytes, hash, dto.Remark, dto.TimeStamp); // 添加新的blob // 注意并发添加时可能会添加相同内容的blob using (var transaction = await _repository.BeginTransactionAsync()) { foreach (var chunk in chunks) { // 这里需要逐个分块提交, 否则会触发mysql的max_allowed_packet错误 await _repository.AddAsync(chunk); await _repository.SaveChangesAsync(); } transaction.Commit(); } return(blobId); }
/** * Get an extended name from the GNU extended name buffer. * * @param offset pointer to entry within the buffer * @return the extended file name; without trailing "/" if present. * @throws IOException if name not found or buffer not set up */ private String getExtendedName(int offset) { if (namebuffer == null) { throw new java.io.IOException("Cannot process GNU long filename as no // record was found"); } for (int i = offset; i < namebuffer.Length; i++) { if (namebuffer[i] == '\u0010')// Octal 12 => Dezimal 10 { if (namebuffer[i - 1] == '/') { i--; // drop trailing / } return(ArchiveUtils.toAsciiString(namebuffer, offset, i - offset)); } } throw new java.io.IOException("Failed to read entry: " + offset); }
/** * Checks if the signature matches what is expected for a tar file. * * @param signature * the bytes to check * @param length * the number of bytes to check * @return true, if this stream is a tar archive stream, false otherwise */ public static bool matches(byte[] signature, int length) { if (length < TarConstants.VERSION_OFFSET + TarConstants.VERSIONLEN) { return(false); } if (ArchiveUtils.matchAsciiBuffer(TarConstants.MAGIC_POSIX, signature, TarConstants.MAGIC_OFFSET, TarConstants.MAGICLEN) && ArchiveUtils.matchAsciiBuffer(TarConstants.VERSION_POSIX, signature, TarConstants.VERSION_OFFSET, TarConstants.VERSIONLEN) ) { return(true); } if (ArchiveUtils.matchAsciiBuffer(TarConstants.MAGIC_GNU, signature, TarConstants.MAGIC_OFFSET, TarConstants.MAGICLEN) && ( ArchiveUtils.matchAsciiBuffer(TarConstants.VERSION_GNU_SPACE, signature, TarConstants.VERSION_OFFSET, TarConstants.VERSIONLEN) || ArchiveUtils.matchAsciiBuffer(TarConstants.VERSION_GNU_ZERO, signature, TarConstants.VERSION_OFFSET, TarConstants.VERSIONLEN) ) ) { return(true); } // COMPRESS-107 - recognise Ant tar files if (ArchiveUtils.matchAsciiBuffer(TarConstants.MAGIC_ANT, signature, TarConstants.MAGIC_OFFSET, TarConstants.MAGICLEN) && ArchiveUtils.matchAsciiBuffer(TarConstants.VERSION_ANT, signature, TarConstants.VERSION_OFFSET, TarConstants.VERSIONLEN) ) { return(true); } return(false); }
public string PerformPatch() { SafeDirectory.SetCurrentDirectory(MiscUtils.WORKING_FOLDER); UpdateProgress(0); var commands = GetCommands(_commandsText).ToList(); foreach (var command in commands) { try { var commandToExecute = command; //TODO: this is strange but it fixes not being able to find the file for 5.3 if (command.Contains(@"$firmware.ipsw")) { commandToExecute = command.Replace("$", string.Empty); } ExecuteCommand(commandToExecute); } catch (Exception e) { Console.WriteLine(e); throw; } } var fullOutputFileName = Path.Combine(SafeDirectory.GetCurrentDirectory(), MiscUtils.OUTPUT_FIRMWARE_NAME); SafeDirectory.SetCurrentDirectory(MiscUtils.OUTPUT_FOLDER_NAME); string currentDirectory = SafeDirectory.GetCurrentDirectory(); ArchiveUtils.CreateSample(fullOutputFileName, null, MiscUtils.GetAllFileInFodler(currentDirectory)); UpdateProgress(300); return(fullOutputFileName); }
private void InitBinaries() { #if DEBUG // for debug purpuses use local "Binaries" folder string binariesPath = ConfigurationManager.AppSettings["binariesPath"]; if (string.IsNullOrEmpty(binariesPath)) { throw new ArgumentException("Use binariesPath variable in app.config for debug!!!"); } MiscUtils.RecreateDirectory(MiscUtils.WORKING_FOLDER); CopyFolder(binariesPath, MiscUtils.WORKING_FOLDER); #else // for production use embedded Binaries.zip using (Stream io = this.GetType().Assembly.GetManifestResourceStream(BINARIES_RESOURCE_NAME)) { MiscUtils.RecreateDirectory(MiscUtils.WORKING_FOLDER); ArchiveUtils.GetViaZipInput(io, MiscUtils.WORKING_FOLDER); } #endif }
static void Main(string[] args) { Application.EnableVisualStyles(); Application.SetCompatibleTextRenderingDefault(false); // 无参数时, 注册右键菜单界面; 含参数时, 将其作为 压缩文件路径 进行挂载 // MessageBox.Show(args.Length.ToString()); if (args.Length == 0) { // Application.Run(new RegisteryDialog; new RegisteryDialog().ShowDialog(); } else { if (ArchiveUtils.IsEncrypted(args[0])) { new PasswordDialog(args[0]).ShowDialog(); } else { /* * [lyne] * .NET FrameWork 的 ProcessStartInfo.Arguments 是 string, 设计上以空格分隔参数. * 如果参数包含空格, .NET FrameWork 不会自动在两边添加 "", 需自动添加. * .NET 5 的 ProcessStartInfo.ArgumentList 设计上好些. */ string pathArg = $"\"path={args[0]}\""; ProcessStartInfo startInfo = new ProcessStartInfo { UseShellExecute = true, FileName = Program.WinAVFSCLIPath, Arguments = pathArg, }; Process.Start(startInfo); } // [lyne] Forms 下执行 Console.WriteLine("无密码"); 无响应. } }
public IEnumerable <BlobEntity> SplitChunks( Guid blobId, byte[] originalBytes, string hash, string remark, long timeStamp) { // 压缩分块之前的内容 var bodyBytes = ArchiveUtils.CompressToGZip(originalBytes); var chunkIndex = 0; var bodyBytesStart = 0; // 对内容进行分块 do { var entityBodySize = Math.Min(bodyBytes.Length - bodyBytesStart, BlobEntity.BlobChunkSize); var entity = new BlobEntity(); entity.Id = PrimaryKeyUtils.Generate <Guid>(); entity.BlobId = blobId; entity.ChunkIndex = chunkIndex++; entity.Remark = remark; if (bodyBytesStart == 0 && entityBodySize == bodyBytes.Length) { // 不需要分块 entity.Body = bodyBytes; } else { entity.Body = new byte[entityBodySize]; Array.Copy(bodyBytes, bodyBytesStart, entity.Body, 0, entityBodySize); } bodyBytesStart += entityBodySize; entity.TimeStamp = timeStamp == 0 ? DateTime.UtcNow : Mapper.Map <long, DateTime>(timeStamp); entity.BodyHash = hash; entity.CreateTime = DateTime.UtcNow; yield return(entity); } while (bodyBytesStart < bodyBytes.Length); }
private long writeArchiveHeader() //throws IOException { byte [] header = ArchiveUtils.toAsciiBytes(ArArchiveEntry.HEADER); outJ.write(header); return(header.Length); }
/** * Returns the next AR entry in this stream. * * @return the next AR entry. * @throws IOException * if the entry could not be read */ public ArArchiveEntry getNextArEntry() // throws IOException { if (currentEntry != null) { long entryEnd = entryOffset + currentEntry.getLength(); while (offset < entryEnd) { int x = read(); if (x == -1) { // hit EOF before previous entry was complete // TODO: throw an exception instead? return(null); } } currentEntry = null; } if (offset == 0) { byte[] expected = ArchiveUtils.toAsciiBytes(ArArchiveEntry.HEADER); byte[] realized = new byte[expected.Length]; int readJ = read(realized); if (readJ != expected.Length) { throw new java.io.IOException("failed to read header. Occured at byte: " + getBytesRead()); } for (int i = 0; i < expected.Length; i++) { if (expected[i] != realized[i]) { throw new java.io.IOException("invalid header " + ArchiveUtils.toAsciiString(realized)); } } } if (offset % 2 != 0) { if (read() < 0) { // hit eof return(null); } } if (input.available() == 0) { return(null); } byte[] name = new byte[16]; byte[] lastmodified = new byte[12]; byte[] userid = new byte[6]; byte[] groupid = new byte[6]; byte[] filemode = new byte[8]; byte[] length = new byte[10]; read(name); read(lastmodified); read(userid); read(groupid); read(filemode); read(length); { byte[] expected = ArchiveUtils.toAsciiBytes(ArArchiveEntry.TRAILER); byte[] realized = new byte[expected.Length]; int readJ = read(realized); if (readJ != expected.Length) { throw new java.io.IOException("failed to read entry trailer. Occured at byte: " + getBytesRead()); } for (int i = 0; i < expected.Length; i++) { if (expected[i] != realized[i]) { throw new java.io.IOException("invalid entry trailer. not read the content? Occured at byte: " + getBytesRead()); } } } entryOffset = offset; // GNU ar stores multiple extended filenames in the data section of a file with the name "//", this record is referred to by future headers. A header references an extended filename by storing a "/" followed by a decimal offset to the start of the filename in the extended filename data section. The format of this "//" file itself is simply a list of the long filenames, each separated by one or more LF characters. Note that the decimal offsets are number of characters, not line or string number within the "//" file. // // GNU ar uses a '/' to mark the end of the filename; this allows for the use of spaces without the use of an extended filename. // entry name is stored as ASCII string String temp = ArchiveUtils.toAsciiString(name).trim(); if (temp.equals("//")) // GNU extended filenames entry { int bufflen = asInt(length); // Assume length will fit in an int namebuffer = new byte[bufflen]; int readJ = read(namebuffer, 0, bufflen); if (readJ != bufflen) { throw new java.io.IOException("Failed to read complete // record: expected=" + bufflen + " read=" + readJ); } currentEntry = new ArArchiveEntry(temp, bufflen); return(getNextArEntry()); } else if (temp.EndsWith("/")) // GNU terminator { temp = temp.substring(0, temp.length() - 1); } else if (temp.matches("^/\\d+")) // GNU long filename ref. { int offsetJ = java.lang.Integer.parseInt(temp.substring(1)); // get the offset temp = getExtendedName(offsetJ); // convert to the long name } currentEntry = new ArArchiveEntry(temp, asLong(length), asInt(userid), asInt(groupid), asInt(filemode, 8), asLong(lastmodified)); return(currentEntry); }