decode_mem_disp(ref ud u, int size, ref ud_operand op) { switch (size) { case 8: op.offset = 8; op.lval.ubyte = inp_uint8(ref u); break; case 16: op.offset = 16; op.lval.uword = inp_uint16(ref u); break; case 32: op.offset = 32; op.lval.udword = inp_uint32(ref u); break; case 64: op.offset = 64; op.lval.uqword = inp_uint64(ref u); break; default: return; } }
decode_vex_vvvv(ref ud u, ref ud_operand opr, ud_operand_size size) { byte vvvv; Debug.Assert(u.vex_op != 0); vvvv = (byte)(((u.vex_op == 0xc4 ? u.vex_b2 : u.vex_b1) >> 3) & 0xf); decode_reg(ref u, ref opr, reg_class.REGCLASS_XMM, (byte)(0xf & ~vvvv), size); }
/* * decode_modrm_reg * * Decodes reg field of mod/rm byte * */ void decode_modrm_reg(ref ud u, ref ud_operand operand, reg_class type, ud_operand_size size) { byte reg = (byte)((BitOps.REX_R(u._rex) << 3) | BitOps.MODRM_REG(modrm(ref u))); decode_reg(ref u, ref operand, type, reg, size); }
decode_moffset(ref ud u, ud_operand_size size, ref ud_operand opr) { opr.type = ud_type.UD_OP_MEM; opr.@base = ud_type.UD_NONE; opr.index = ud_type.UD_NONE; opr.scale = 0; opr.size = (byte)resolve_operand_size(ref u, size); decode_mem_disp(ref u, u.adr_mode, ref opr); }
decode_vex_immreg(ref ud u, ref ud_operand opr, ud_operand_size size) { byte imm = (byte)inp_next(ref u); byte mask = (byte)(u.dis_mode == 64 ? 0xf : 0x7); if (u.error != 0) { return(u.error); } Debug.Assert(u.vex_op != 0); decode_reg(ref u, ref opr, reg_class.REGCLASS_XMM, (byte)(mask & (imm >> 4)), size); return(0); }
public ulong ud_syn_rel_target(ref ud u, ref ud_operand opr) { ulong trunc_mask = 0xffffffffffffffff >> (64 - u.opr_mode); switch (opr.size) { case 8: return((u.pc + (ulong)opr.lval.@sbyte) & trunc_mask); case 16: return((u.pc + (ulong)opr.lval.sword) & trunc_mask); case 32: return((u.pc + (ulong)opr.lval.sdword) & trunc_mask); default: Debug.Assert(false, "invalid relative offset size."); return(0); } }
decode_reg(ref ud u, ref ud_operand opr, reg_class type, byte num, ud_operand_size size) { int reg; size = resolve_operand_size(ref u, size); switch (type) { case reg_class.REGCLASS_GPR: reg = (int)decode_gpr(ref u, size, (byte)num); break; case reg_class.REGCLASS_MMX: reg = (int)ud_type.UD_R_MM0 + (num & 7); break; case reg_class.REGCLASS_XMM: reg = num + (int)(size == ud_operand_size.SZ_QQ ? ud_type.UD_R_YMM0 : ud_type.UD_R_XMM0); break; case reg_class.REGCLASS_CR: reg = (int)ud_type.UD_R_CR0 + num; break; case reg_class.REGCLASS_DB: reg = (int)ud_type.UD_R_DR0 + num; break; case reg_class.REGCLASS_SEG: { /* * Only 6 segment registers, anything else is an error. */ if ((num & 7) > 5) { u.error = 1; u.errorMessage = "invalid segment register value\n"; return; } else { reg = (int)ud_type.UD_R_ES + (num & 7); } break; } default: Debug.Assert(false, "invalid register type"); return; } opr.type = ud_type.UD_OP_REG; opr.@base = (ud_type)reg; opr.size = (byte)size; }
public void ud_syn_print_mem_disp(ref ud u, ref ud_operand op, int sign) { Debug.Assert(op.offset != 0); if (op.@base == ud_type.UD_NONE && op.index == ud_type.UD_NONE) { ulong v; Debug.Assert(op.scale == 0 && op.offset != 8); /* unsigned mem-offset */ switch (op.offset) { case 16: v = op.lval.uword; break; case 32: v = op.lval.udword; break; case 64: v = op.lval.uqword; break; default: Debug.Assert(false, "invalid offset"); v = 0; /* keep cc happy */ break; } ud_asmprintf(ref u, "0x{0:x}", v); } else { long v; Debug.Assert(op.offset != 64); switch (op.offset) { case 8: v = op.lval.@sbyte; break; case 16: v = op.lval.sword; break; case 32: v = op.lval.sdword; break; default: Debug.Assert(false, "invalid offset"); v = 0; /* keep cc happy */ break; } if (v < 0) { ud_asmprintf(ref u, "-0x{0:x}", -v); } else if (v > 0) { ud_asmprintf(ref u, "{0}0x{1:x}", sign > 0 ? "+" : "", v); } } }
decode_imm(ref ud u, ud_operand_size size, ref ud_operand op) { op.size = (byte)resolve_operand_size(ref u, size); op.type = ud_type.UD_OP_IMM; switch (op.size) { case 8: op.lval.@sbyte = (sbyte)inp_uint8(ref u); break; case 16: op.lval.uword = inp_uint16(ref u); break; case 32: op.lval.udword = inp_uint32(ref u); break; case 64: op.lval.uqword = inp_uint64(ref u); break; default: return; } }
/* ----------------------------------------------------------------------------- * decode_a()- Decodes operands of the type seg:offset * ----------------------------------------------------------------------------- */ void decode_a(ref ud u, ref ud_operand op) { if (u.opr_mode == 16) { /* seg16:off16 */ op.type = ud_type.UD_OP_PTR; op.size = 32; op.lval.ptr_off = inp_uint16(ref u); op.lval.ptr_seg = inp_uint16(ref u); } else { /* seg16:off32 */ op.type = ud_type.UD_OP_PTR; op.size = 48; op.lval.ptr_off = inp_uint32(ref u); op.lval.ptr_seg = inp_uint16(ref u); } }
/* ----------------------------------------------------------------------------- * opr_cast() - Prints an operand cast. * ----------------------------------------------------------------------------- */ public void opr_cast(ref ud u, ref ud_operand op) { if (u.br_far > 0) { ud_asmprintf(ref u, "far "); } switch (op.size) { case 8: ud_asmprintf(ref u, "byte "); break; case 16: ud_asmprintf(ref u, "word "); break; case 32: ud_asmprintf(ref u, "dword "); break; case 64: ud_asmprintf(ref u, "qword "); break; case 80: ud_asmprintf(ref u, "tword "); break; default: break; } }
public void ud_syn_print_imm(ref ud u, ref ud_operand op) { ulong v; if (op._oprcode == ud_operand_code.OP_sI && op.size != u.opr_mode) { if (op.size == 8) { v = (ulong)op.lval.@sbyte; } else { Debug.Assert(op.size == 32); v = (ulong)op.lval.sdword; } if (u.opr_mode < 64) { v = v & ((1ul << u.opr_mode) - 1ul); } } else { switch (op.size) { case 8: v = op.lval.ubyte; break; case 16: v = op.lval.uword; break; case 32: v = op.lval.udword; break; case 64: v = op.lval.uqword; break; default: Debug.Assert(false, "invalid offset"); v = 0; /* keep cc happy */ break; } } ud_asmprintf(ref u, "0x{0:x}", v); }
/* ============================================================================= * ud_opr_is_sreg * Returns non-zero if the given operand is of a general purpose * register type. * ============================================================================= */ /// <summary> /// Returns true if the given operand is of a general purpose /// register type. /// </summary> /// <param name="opr"></param> /// <returns></returns> public static bool ud_opr_is_gpr(ref ud_operand opr) { return opr.type == ud_type.UD_OP_REG && opr.@base >= ud_type.UD_R_AL && opr.@base <= ud_type.UD_R_R15; }
ud_opr_is_gpr(ref ud_operand opr) { return(opr.type == ud_type.UD_OP_REG && opr.@base >= ud_type.UD_R_AL && opr.@base <= ud_type.UD_R_R15); }
internal Operand(ud_operand operand) { UdOperand = operand; }
/* ----------------------------------------------------------------------------- * gen_operand() - Generates assembly output for each operand. * ----------------------------------------------------------------------------- */ void gen_operand(ref ud u, ref ud_operand op, int syn_cast) { switch (op.type) { case ud_type.UD_OP_REG: syn.ud_asmprintf(ref u, "{0}", ud_reg_tab[op.@base - ud_type.UD_R_AL]); break; case ud_type.UD_OP_MEM: if (syn_cast > 0) { opr_cast(ref u, ref op); } ud_asmprintf(ref u, "["); if (u.pfx_seg > 0) { syn.ud_asmprintf(ref u, "{0}:", ud_reg_tab[u.pfx_seg - (int)ud_type.UD_R_AL]); } if (op.@base > 0) { syn.ud_asmprintf(ref u, "{0}", ud_reg_tab[op.@base - ud_type.UD_R_AL]); } if (op.index > 0) { syn.ud_asmprintf(ref u, "{0}{1}", op.@base != ud_type.UD_NONE ? "+" : "", ud_reg_tab[op.index - ud_type.UD_R_AL]); if (op.scale > 0) { syn.ud_asmprintf(ref u, "*{0}", op.scale); } } if (op.offset != 0) { ud_syn_print_mem_disp(ref u, ref op, (op.@base != ud_type.UD_NONE || op.index != ud_type.UD_NONE) ? 1 : 0); } syn.ud_asmprintf(ref u, "]"); break; case ud_type.UD_OP_IMM: ud_syn_print_imm(ref u, ref op); break; case ud_type.UD_OP_JIMM: ud_syn_print_addr(ref u, (long)ud_syn_rel_target(ref u, ref op)); break; case ud_type.UD_OP_PTR: switch (op.size) { case 32: ud_asmprintf(ref u, "word 0x{0:x}:0x{1:x}", op.lval.ptr_seg, op.lval.ptr_off & 0xFFFF); break; case 48: ud_asmprintf(ref u, "dword 0x{0:x}:0x{0:x}", op.lval.ptr_seg, op.lval.ptr_off); break; } break; case ud_type.UD_OP_CONST: if (syn_cast > 0) { opr_cast(ref u, ref op); } ud_asmprintf(ref u, "{0}", op.lval.udword); break; default: return; } }
decode_modrm_rm(ref ud u, ref ud_operand op, reg_class type, /* register type */ ud_operand_size size) /* operand size */ { int offset = 0; byte mod, rm; /* get mod, r/m and reg fields */ mod = BitOps.MODRM_MOD(modrm(ref u)); rm = (byte)((BitOps.REX_B(u._rex) << 3) | BitOps.MODRM_RM(modrm(ref u))); /* * If mod is 11b, then the modrm.rm specifies a register. * */ if (mod == 3) { decode_reg(ref u, ref op, type, rm, size); return; } /* * !11b => Memory Address */ op.type = ud_type.UD_OP_MEM; op.size = (byte)resolve_operand_size(ref u, size); if (u.adr_mode == 64) { op.@base = ud_type.UD_R_RAX + rm; if (mod == 1) { offset = 8; } else if (mod == 2) { offset = 32; } else if (mod == 0 && (rm & 7) == 5) { op.@base = ud_type.UD_R_RIP; offset = 32; } else { offset = 0; } /* * Scale-Index-Base (SIB) */ if ((rm & 7) == 4) { inp_next(ref u); op.@base = ud_type.UD_R_RAX + (BitOps.SIB_B(inp_curr(ref u)) | (BitOps.REX_B(u._rex) << 3)); op.index = ud_type.UD_R_RAX + (BitOps.SIB_I(inp_curr(ref u)) | (BitOps.REX_X(u._rex) << 3)); /* special conditions for base reference */ if (op.index == ud_type.UD_R_RSP) { op.index = ud_type.UD_NONE; op.scale = (byte)ud_type.UD_NONE; } else { op.scale = (byte)((1 << BitOps.SIB_S(inp_curr(ref u))) & ~1); } if (op.@base == ud_type.UD_R_RBP || op.@base == ud_type.UD_R_R13) { if (mod == 0) { op.@base = ud_type.UD_NONE; } if (mod == 1) { offset = 8; } else { offset = 32; } } } else { op.scale = 0; op.index = ud_type.UD_NONE; } } else if (u.adr_mode == 32) { op.@base = ud_type.UD_R_EAX + rm; if (mod == 1) { offset = 8; } else if (mod == 2) { offset = 32; } else if (mod == 0 && rm == 5) { op.@base = ud_type.UD_NONE; offset = 32; } else { offset = 0; } /* Scale-Index-Base (SIB) */ if ((rm & 7) == 4) { inp_next(ref u); op.scale = (byte)((1 << BitOps.SIB_S(inp_curr(ref u))) & ~1); op.index = ud_type.UD_R_EAX + (BitOps.SIB_I(inp_curr(ref u)) | (BitOps.REX_X(u.pfx_rex) << 3)); op.@base = ud_type.UD_R_EAX + (BitOps.SIB_B(inp_curr(ref u)) | (BitOps.REX_B(u.pfx_rex) << 3)); if (op.index == ud_type.UD_R_ESP) { op.index = ud_type.UD_NONE; op.scale = (byte)ud_type.UD_NONE; } /* special condition for base reference */ if (op.@base == ud_type.UD_R_EBP) { if (mod == 0) { op.@base = ud_type.UD_NONE; } if (mod == 1) { offset = 8; } else { offset = 32; } } } else { op.scale = 0; op.index = ud_type.UD_NONE; } } else { ud_type[] bases = { ud_type.UD_R_BX, ud_type.UD_R_BX, ud_type.UD_R_BP, ud_type.UD_R_BP, ud_type.UD_R_SI, ud_type.UD_R_DI, ud_type.UD_R_BP, ud_type.UD_R_BX }; ud_type[] indices = { ud_type.UD_R_SI, ud_type.UD_R_DI, ud_type.UD_R_SI, ud_type.UD_R_DI, ud_type.UD_NONE, ud_type.UD_NONE, ud_type.UD_NONE, ud_type.UD_NONE }; op.@base = bases[rm & 7]; op.index = indices[rm & 7]; op.scale = 0; if (mod == 0 && rm == 6) { offset = 16; op.@base = ud_type.UD_NONE; } else if (mod == 1) { offset = 8; } else if (mod == 2) { offset = 16; } } if (offset > 0) { decode_mem_disp(ref u, offset, ref op); } else { op.offset = 0; } }
decode_operand(ref ud u, ref ud_operand operand, ud_operand_code type, ud_operand_size size) { operand.type = ud_type.UD_NONE; operand._oprcode = type; switch (type) { case ud_operand_code.OP_A: decode_a(ref u, ref operand); break; case ud_operand_code.OP_MR: decode_modrm_rm(ref u, ref operand, (byte)reg_class.REGCLASS_GPR, BitOps.MODRM_MOD(modrm(ref u)) == 3 ? size.Mx_reg_size() : size.Mx_mem_size()); break; case ud_operand_code.OP_F: u.br_far = 1; if (BitOps.MODRM_MOD(modrm(ref u)) == 3) { u.error = 1; u.errorMessage = "expected modrm.mod != 3\n"; } decode_modrm_rm(ref u, ref operand, reg_class.REGCLASS_GPR, size); break; case ud_operand_code.OP_M: if (BitOps.MODRM_MOD(modrm(ref u)) == 3) { u.error = 1; u.errorMessage = "expected modrm.mod != 3\n"; } decode_modrm_rm(ref u, ref operand, reg_class.REGCLASS_GPR, size); break; case ud_operand_code.OP_E: decode_modrm_rm(ref u, ref operand, reg_class.REGCLASS_GPR, size); break; case ud_operand_code.OP_G: decode_modrm_reg(ref u, ref operand, reg_class.REGCLASS_GPR, size); break; case ud_operand_code.OP_sI: case ud_operand_code.OP_I: decode_imm(ref u, size, ref operand); break; case ud_operand_code.OP_I1: operand.type = ud_type.UD_OP_CONST; operand.lval.udword = 1; break; case ud_operand_code.OP_N: if (BitOps.MODRM_MOD(modrm(ref u)) != 3) { u.error = 1; u.errorMessage = "expected modrm.mod == 3\n"; } decode_modrm_rm(ref u, ref operand, reg_class.REGCLASS_MMX, size); break; case ud_operand_code.OP_Q: decode_modrm_rm(ref u, ref operand, reg_class.REGCLASS_MMX, size); break; case ud_operand_code.OP_P: decode_modrm_reg(ref u, ref operand, reg_class.REGCLASS_MMX, size); break; case ud_operand_code.OP_U: if (BitOps.MODRM_MOD(modrm(ref u)) != 3) { u.error = 1; u.errorMessage = "expected modrm.mod == 3\n"; } decode_modrm_rm(ref u, ref operand, reg_class.REGCLASS_XMM, size); break; case ud_operand_code.OP_W: decode_modrm_rm(ref u, ref operand, reg_class.REGCLASS_XMM, size); break; case ud_operand_code.OP_V: decode_modrm_reg(ref u, ref operand, reg_class.REGCLASS_XMM, size); break; case ud_operand_code.OP_H: decode_vex_vvvv(ref u, ref operand, size); break; case ud_operand_code.OP_MU: decode_modrm_rm(ref u, ref operand, reg_class.REGCLASS_XMM, BitOps.MODRM_MOD(modrm(ref u)) == 3 ? size.Mx_reg_size() : size.Mx_mem_size()); break; case ud_operand_code.OP_S: decode_modrm_reg(ref u, ref operand, reg_class.REGCLASS_SEG, size); break; case ud_operand_code.OP_O: decode_moffset(ref u, size, ref operand); break; case ud_operand_code.OP_R0: case ud_operand_code.OP_R1: case ud_operand_code.OP_R2: case ud_operand_code.OP_R3: case ud_operand_code.OP_R4: case ud_operand_code.OP_R5: case ud_operand_code.OP_R6: case ud_operand_code.OP_R7: decode_reg(ref u, ref operand, reg_class.REGCLASS_GPR, (byte)((BitOps.REX_B(u._rex) << 3) | (type - ud_operand_code.OP_R0)), size); break; case ud_operand_code.OP_AL: case ud_operand_code.OP_AX: case ud_operand_code.OP_eAX: case ud_operand_code.OP_rAX: decode_reg(ref u, ref operand, reg_class.REGCLASS_GPR, 0, size); break; case ud_operand_code.OP_CL: case ud_operand_code.OP_CX: case ud_operand_code.OP_eCX: decode_reg(ref u, ref operand, reg_class.REGCLASS_GPR, 1, size); break; case ud_operand_code.OP_DL: case ud_operand_code.OP_DX: case ud_operand_code.OP_eDX: decode_reg(ref u, ref operand, reg_class.REGCLASS_GPR, 2, size); break; case ud_operand_code.OP_ES: case ud_operand_code.OP_CS: case ud_operand_code.OP_DS: case ud_operand_code.OP_SS: case ud_operand_code.OP_FS: case ud_operand_code.OP_GS: /* in 64bits mode, only fs and gs are allowed */ if (u.dis_mode == 64) { if (type != ud_operand_code.OP_FS && type != ud_operand_code.OP_GS) { u.error = 1; u.errorMessage = "invalid segment register in 64bits\n"; } } operand.type = ud_type.UD_OP_REG; operand.@base = (type - ud_operand_code.OP_ES) + ud_type.UD_R_ES; operand.size = 16; break; case ud_operand_code.OP_J: decode_imm(ref u, size, ref operand); operand.type = ud_type.UD_OP_JIMM; break; case ud_operand_code.OP_R: if (BitOps.MODRM_MOD(modrm(ref u)) != 3) { u.error = 1; u.errorMessage = "expected modrm.mod == 3\n"; } decode_modrm_rm(ref u, ref operand, reg_class.REGCLASS_GPR, size); break; case ud_operand_code.OP_C: decode_modrm_reg(ref u, ref operand, reg_class.REGCLASS_CR, size); break; case ud_operand_code.OP_D: decode_modrm_reg(ref u, ref operand, reg_class.REGCLASS_DB, size); break; case ud_operand_code.OP_I3: operand.type = ud_type.UD_OP_CONST; operand.lval.@sbyte = 3; break; case ud_operand_code.OP_ST0: case ud_operand_code.OP_ST1: case ud_operand_code.OP_ST2: case ud_operand_code.OP_ST3: case ud_operand_code.OP_ST4: case ud_operand_code.OP_ST5: case ud_operand_code.OP_ST6: case ud_operand_code.OP_ST7: operand.type = ud_type.UD_OP_REG; operand.@base = (type - ud_operand_code.OP_ST0) + ud_type.UD_R_ST0; operand.size = 80; break; case ud_operand_code.OP_L: decode_vex_immreg(ref u, ref operand, size); break; default: operand.type = ud_type.UD_NONE; break; } return(operand.type); }
ud_opr_is_sreg(ref ud_operand opr) { return(opr.type == ud_type.UD_OP_REG && opr.@base >= ud_type.UD_R_ES && opr.@base <= ud_type.UD_R_GS); }
decode_vex_immreg(ref ud u, ref ud_operand opr, ud_operand_size size) { byte imm = (byte)inp_next(ref u); byte mask = (byte)(u.dis_mode == 64 ? 0xf : 0x7); if (u.error != 0) return u.error; Debug.Assert(u.vex_op != 0); decode_reg(ref u, ref opr, reg_class.REGCLASS_XMM, (byte)(mask & (imm >> 4)), size); return 0; }
/* ============================================================================= * ud_insn_get_opr * Return the operand struct representing the nth operand of * the currently disassembled instruction. Returns NULL if * there's no such operand. * ============================================================================= */ /// <summary> /// Return the operand struct representing the nth operand of /// the currently disassembled instruction. Returns NULL if /// there's no such operand. /// </summary> /// <param name="u"></param> /// <param name="n"></param> /// <param name="op"></param> public static void ud_insn_opr(ref ud u, int n, out ud_operand? op) { if (n > 3 || u.operand[n].type == ud_type.UD_NONE) { op = null; } else { op = u.operand[n]; } }
decode_operand(ref ud u, ref ud_operand operand, ud_operand_code type, ud_operand_size size) { operand.type = ud_type.UD_NONE; operand._oprcode = type; switch (type) { case ud_operand_code.OP_A: decode_a(ref u, ref operand); break; case ud_operand_code.OP_MR: decode_modrm_rm(ref u, ref operand, (byte)reg_class.REGCLASS_GPR, BitOps.MODRM_MOD(modrm(ref u)) == 3 ? size.Mx_reg_size() : size.Mx_mem_size()); break; case ud_operand_code.OP_F: u.br_far = 1; if (BitOps.MODRM_MOD(modrm(ref u)) == 3) { u.error = 1; u.errorMessage = "expected modrm.mod != 3\n"; } decode_modrm_rm(ref u, ref operand, reg_class.REGCLASS_GPR, size); break; case ud_operand_code.OP_M: if (BitOps.MODRM_MOD(modrm(ref u)) == 3) { u.error = 1; u.errorMessage = "expected modrm.mod != 3\n"; } decode_modrm_rm(ref u, ref operand, reg_class.REGCLASS_GPR, size); break; case ud_operand_code.OP_E: decode_modrm_rm(ref u, ref operand, reg_class.REGCLASS_GPR, size); break; case ud_operand_code.OP_G: decode_modrm_reg(ref u, ref operand, reg_class.REGCLASS_GPR, size); break; case ud_operand_code.OP_sI: case ud_operand_code.OP_I: decode_imm(ref u, size, ref operand); break; case ud_operand_code.OP_I1: operand.type = ud_type.UD_OP_CONST; operand.lval.udword = 1; break; case ud_operand_code.OP_N: if (BitOps.MODRM_MOD(modrm(ref u)) != 3) { u.error = 1; u.errorMessage = "expected modrm.mod == 3\n"; } decode_modrm_rm(ref u, ref operand, reg_class.REGCLASS_MMX, size); break; case ud_operand_code.OP_Q: decode_modrm_rm(ref u, ref operand, reg_class.REGCLASS_MMX, size); break; case ud_operand_code.OP_P: decode_modrm_reg(ref u, ref operand, reg_class.REGCLASS_MMX, size); break; case ud_operand_code.OP_U: if (BitOps.MODRM_MOD(modrm(ref u)) != 3) { u.error = 1; u.errorMessage = "expected modrm.mod == 3\n"; } decode_modrm_rm(ref u, ref operand, reg_class.REGCLASS_XMM, size); break; case ud_operand_code.OP_W: decode_modrm_rm(ref u, ref operand, reg_class.REGCLASS_XMM, size); break; case ud_operand_code.OP_V: decode_modrm_reg(ref u, ref operand, reg_class.REGCLASS_XMM, size); break; case ud_operand_code.OP_H: decode_vex_vvvv(ref u, ref operand, size); break; case ud_operand_code.OP_MU: decode_modrm_rm(ref u, ref operand, reg_class.REGCLASS_XMM, BitOps.MODRM_MOD(modrm(ref u)) == 3 ? size.Mx_reg_size() : size.Mx_mem_size()); break; case ud_operand_code.OP_S: decode_modrm_reg(ref u, ref operand, reg_class.REGCLASS_SEG, size); break; case ud_operand_code.OP_O: decode_moffset(ref u, size, ref operand); break; case ud_operand_code.OP_R0: case ud_operand_code.OP_R1: case ud_operand_code.OP_R2: case ud_operand_code.OP_R3: case ud_operand_code.OP_R4: case ud_operand_code.OP_R5: case ud_operand_code.OP_R6: case ud_operand_code.OP_R7: decode_reg(ref u, ref operand, reg_class.REGCLASS_GPR, (byte)((BitOps.REX_B(u._rex) << 3) | (type - ud_operand_code.OP_R0)), size); break; case ud_operand_code.OP_AL: case ud_operand_code.OP_AX: case ud_operand_code.OP_eAX: case ud_operand_code.OP_rAX: decode_reg(ref u, ref operand, reg_class.REGCLASS_GPR, 0, size); break; case ud_operand_code.OP_CL: case ud_operand_code.OP_CX: case ud_operand_code.OP_eCX: decode_reg(ref u, ref operand, reg_class.REGCLASS_GPR, 1, size); break; case ud_operand_code.OP_DL: case ud_operand_code.OP_DX: case ud_operand_code.OP_eDX: decode_reg(ref u, ref operand, reg_class.REGCLASS_GPR, 2, size); break; case ud_operand_code.OP_ES: case ud_operand_code.OP_CS: case ud_operand_code.OP_DS: case ud_operand_code.OP_SS: case ud_operand_code.OP_FS: case ud_operand_code.OP_GS: /* in 64bits mode, only fs and gs are allowed */ if (u.dis_mode == 64) { if (type != ud_operand_code.OP_FS && type != ud_operand_code.OP_GS) { u.error = 1; u.errorMessage = "invalid segment register in 64bits\n"; } } operand.type = ud_type.UD_OP_REG; operand.@base = (type - ud_operand_code.OP_ES) + ud_type.UD_R_ES; operand.size = 16; break; case ud_operand_code.OP_J: decode_imm(ref u, size, ref operand); operand.type = ud_type.UD_OP_JIMM; break; case ud_operand_code.OP_R: if (BitOps.MODRM_MOD(modrm(ref u)) != 3) { u.error = 1; u.errorMessage = "expected modrm.mod == 3\n"; } decode_modrm_rm(ref u, ref operand, reg_class.REGCLASS_GPR, size); break; case ud_operand_code.OP_C: decode_modrm_reg(ref u, ref operand, reg_class.REGCLASS_CR, size); break; case ud_operand_code.OP_D: decode_modrm_reg(ref u, ref operand, reg_class.REGCLASS_DB, size); break; case ud_operand_code.OP_I3: operand.type = ud_type.UD_OP_CONST; operand.lval.@sbyte = 3; break; case ud_operand_code.OP_ST0: case ud_operand_code.OP_ST1: case ud_operand_code.OP_ST2: case ud_operand_code.OP_ST3: case ud_operand_code.OP_ST4: case ud_operand_code.OP_ST5: case ud_operand_code.OP_ST6: case ud_operand_code.OP_ST7: operand.type = ud_type.UD_OP_REG; operand.@base = (type - ud_operand_code.OP_ST0) + ud_type.UD_R_ST0; operand.size = 80; break; case ud_operand_code.OP_L: decode_vex_immreg(ref u, ref operand, size); break; default: operand.type = ud_type.UD_NONE; break; } return operand.type; }
/* ============================================================================= * ud_opr_is_sreg * Returns non-zero if the given operand is of a segment register type. * ============================================================================= */ /// <summary> /// Returns true if the given operand is of a segment register type. /// </summary> /// <param name="opr"></param> /// <returns></returns> public static bool ud_opr_is_sreg(ref ud_operand opr) { return opr.type == ud_type.UD_OP_REG && opr.@base >= ud_type.UD_R_ES && opr.@base <= ud_type.UD_R_GS; }
public ulong ud_syn_rel_target(ref ud u, ref ud_operand opr) { ulong trunc_mask = 0xffffffffffffffff >> (64 - u.opr_mode); switch (opr.size) { case 8: return (u.pc + (ulong)opr.lval.@sbyte) & trunc_mask; case 16: return (u.pc + (ulong)opr.lval.sword) & trunc_mask; case 32: return (u.pc + (ulong)opr.lval.sdword) & trunc_mask; default: Debug.Assert(false, "invalid relative offset size."); return 0; } }