Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
ctx.emit( lshl_ (prev_value, imm(part.size, 8), tmp0))
ctx.emit( add_ (part, tmp0, value))
return value
def vex_opnds(i):
if len(i.operands) == 3:
# additional VEX operand
return 2, 1, 0
else:
return 0, 1, 0
x86_movaps = memory.x86_mov
x86_movd = memory.x86_mov
x86_movdqa = memory.x86_mov
x86_vmovdqa = memory.x86_mov
x86_movdqu = memory.x86_mov
x86_vmovdqu = memory.x86_mov
x86_movups = memory.x86_mov
def x86_movhpd(ctx, i):
a = operand.get(ctx, i, 0)
value = operand.get(ctx, i, 1)
tmp0 = ctx.tmp(a.size)
tmp1 = ctx.tmp(a.size)
ctx.emit( and_ (a, imm(0x0000000000000000ffffffffffffffff, 128), tmp0))
ctx.emit( str_ (value, tmp1))
ctx.emit( lshl_ (tmp1, imm(64, 8), tmp1))
return value
def vex_opnds(i):
if len(i.operands) == 3:
# additional VEX operand
return 2, 1, 0
else:
return 0, 1, 0
x86_movaps = memory.x86_mov
x86_movd = memory.x86_mov
x86_movdqa = memory.x86_mov
x86_vmovdqa = memory.x86_mov
x86_movdqu = memory.x86_mov
x86_vmovdqu = memory.x86_mov
x86_movups = memory.x86_mov
def x86_movhpd(ctx, i):
a = operand.get(ctx, i, 0)
value = operand.get(ctx, i, 1)
tmp0 = ctx.tmp(a.size)
tmp1 = ctx.tmp(a.size)
ctx.emit( and_ (a, imm(0x0000000000000000ffffffffffffffff, 128), tmp0))
ctx.emit( str_ (value, tmp1))
ctx.emit( lshl_ (tmp1, imm(64, 8), tmp1))
ctx.emit( or_ (tmp0, tmp1, tmp0))
ctx.emit( lshl_ (prev_value, imm(part.size, 8), tmp0))
ctx.emit( add_ (part, tmp0, value))
return value
def vex_opnds(i):
if len(i.operands) == 3:
# additional VEX operand
return 2, 1, 0
else:
return 0, 1, 0
x86_movaps = memory.x86_mov
x86_movd = memory.x86_mov
x86_movdqa = memory.x86_mov
x86_vmovdqa = memory.x86_mov
x86_movdqu = memory.x86_mov
x86_vmovdqu = memory.x86_mov
x86_movups = memory.x86_mov
def x86_movhpd(ctx, i):
a = operand.get(ctx, i, 0)
value = operand.get(ctx, i, 1)
tmp0 = ctx.tmp(a.size)
tmp1 = ctx.tmp(a.size)
ctx.emit( and_ (a, imm(0x0000000000000000ffffffffffffffff, 128), tmp0))
ctx.emit( str_ (value, tmp1))
return value
def vex_opnds(i):
if len(i.operands) == 3:
# additional VEX operand
return 2, 1, 0
else:
return 0, 1, 0
x86_movaps = memory.x86_mov
x86_movd = memory.x86_mov
x86_movdqa = memory.x86_mov
x86_vmovdqa = memory.x86_mov
x86_movdqu = memory.x86_mov
x86_vmovdqu = memory.x86_mov
x86_movups = memory.x86_mov
def x86_movhpd(ctx, i):
a = operand.get(ctx, i, 0)
value = operand.get(ctx, i, 1)
tmp0 = ctx.tmp(a.size)
tmp1 = ctx.tmp(a.size)
ctx.emit( and_ (a, imm(0x0000000000000000ffffffffffffffff, 128), tmp0))
ctx.emit( str_ (value, tmp1))
ctx.emit( lshl_ (tmp1, imm(64, 8), tmp1))
ctx.emit( or_ (tmp0, tmp1, tmp0))
operand.set(ctx, i, 0, tmp0)
ctx.emit( add_ (part, tmp0, value))
return value
def vex_opnds(i):
if len(i.operands) == 3:
# additional VEX operand
return 2, 1, 0
else:
return 0, 1, 0
x86_movaps = memory.x86_mov
x86_movd = memory.x86_mov
x86_movdqa = memory.x86_mov
x86_vmovdqa = memory.x86_mov
x86_movdqu = memory.x86_mov
x86_vmovdqu = memory.x86_mov
x86_movups = memory.x86_mov
def x86_movhpd(ctx, i):
a = operand.get(ctx, i, 0)
value = operand.get(ctx, i, 1)
tmp0 = ctx.tmp(a.size)
tmp1 = ctx.tmp(a.size)
ctx.emit( and_ (a, imm(0x0000000000000000ffffffffffffffff, 128), tmp0))
ctx.emit( str_ (value, tmp1))
ctx.emit( lshl_ (tmp1, imm(64, 8), tmp1))
ctx.emit( or_ (tmp0, tmp1, tmp0))
def vex_opnds(i):
if len(i.operands) == 3:
# additional VEX operand
return 2, 1, 0
else:
return 0, 1, 0
x86_movaps = memory.x86_mov
x86_movd = memory.x86_mov
x86_movdqa = memory.x86_mov
x86_vmovdqa = memory.x86_mov
x86_movdqu = memory.x86_mov
x86_vmovdqu = memory.x86_mov
x86_movups = memory.x86_mov
def x86_movhpd(ctx, i):
a = operand.get(ctx, i, 0)
value = operand.get(ctx, i, 1)
tmp0 = ctx.tmp(a.size)
tmp1 = ctx.tmp(a.size)
ctx.emit( and_ (a, imm(0x0000000000000000ffffffffffffffff, 128), tmp0))
ctx.emit( str_ (value, tmp1))
ctx.emit( lshl_ (tmp1, imm(64, 8), tmp1))
ctx.emit( or_ (tmp0, tmp1, tmp0))
operand.set(ctx, i, 0, tmp0)
def vex_opnds(i):
if len(i.operands) == 3:
# additional VEX operand
return 2, 1, 0
else:
return 0, 1, 0
x86_movaps = memory.x86_mov
x86_movd = memory.x86_mov
x86_movdqa = memory.x86_mov
x86_vmovdqa = memory.x86_mov
x86_movdqu = memory.x86_mov
x86_vmovdqu = memory.x86_mov
x86_movups = memory.x86_mov
def x86_movhpd(ctx, i):
a = operand.get(ctx, i, 0)
value = operand.get(ctx, i, 1)
tmp0 = ctx.tmp(a.size)
tmp1 = ctx.tmp(a.size)
ctx.emit( and_ (a, imm(0x0000000000000000ffffffffffffffff, 128), tmp0))
ctx.emit( str_ (value, tmp1))
ctx.emit( lshl_ (tmp1, imm(64, 8), tmp1))
ctx.emit( or_ (tmp0, tmp1, tmp0))
operand.set(ctx, i, 0, tmp0)
capstone.x86.X86_INS_LGDT: unsupported.privileged,
capstone.x86.X86_INS_LIDT: unsupported.privileged,
capstone.x86.X86_INS_LLDT: unsupported.privileged,
capstone.x86.X86_INS_LMSW: unsupported.privileged,
capstone.x86.X86_INS_LODSB: memory.x86_lodsb,
capstone.x86.X86_INS_LODSD: memory.x86_lodsd,
capstone.x86.X86_INS_LODSQ: memory.x86_lodsq,
capstone.x86.X86_INS_LODSW: memory.x86_lodsw,
capstone.x86.X86_INS_LOOP: control_flow.x86_loop,
capstone.x86.X86_INS_LOOPE: control_flow.x86_loope,
capstone.x86.X86_INS_LOOPNE: control_flow.x86_loopne,
capstone.x86.X86_INS_LSL: unsupported.low_level,
capstone.x86.X86_INS_LTR: unsupported.privileged,
capstone.x86.X86_INS_LZCNT: bitwise.x86_lzcnt,
capstone.x86.X86_INS_MOV: memory.x86_mov,
capstone.x86.X86_INS_MOVABS: memory.x86_movabs,
capstone.x86.X86_INS_MOVAPS: sse.x86_movaps,
capstone.x86.X86_INS_MOVD: sse.x86_movd,
capstone.x86.X86_INS_MOVDQA: sse.x86_movdqa,
capstone.x86.X86_INS_MOVDQU: sse.x86_movdqu,
capstone.x86.X86_INS_MOVHPD: sse.x86_movhpd,
capstone.x86.X86_INS_MOVLPD: sse.x86_movlpd,
capstone.x86.X86_INS_MOVQ: sse.x86_movq,
capstone.x86.X86_INS_MOVSB: memory.x86_movsb,
capstone.x86.X86_INS_MOVSD: memory.x86_movsd,
capstone.x86.X86_INS_MOVSQ: memory.x86_movsq,
capstone.x86.X86_INS_MOVSW: memory.x86_movsw,
capstone.x86.X86_INS_MOVSX: memory.x86_movsx,
capstone.x86.X86_INS_MOVSXD: memory.x86_movsx,
capstone.x86.X86_INS_MOVUPS: sse.x86_movups,
capstone.x86.X86_INS_MOVZX: memory.x86_movzx,