Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __dir__(self):
if self.state.arch.name in ('X86', 'AMD64'):
return list(self.state.arch.registers.keys()) + ['st%d' % n for n in range(8)] + ['tag%d' % n for n in range(8)] + ['flags', 'eflags', 'rflags']
elif is_arm_arch(self.state.arch):
return self.state.arch.registers.keys() + ['flags']
return self.state.arch.registers.keys()
# Delayed load so SimMemory does not rely on SimEngines
from angr.engines.vex.claripy.ccall import _get_flags
if self.category == 'reg':
if self.state.arch.name in ('X86', 'AMD64'):
if name in stn_map:
return (((stn_map[name] + self.load('ftop')) & 7) << 3) + self.state.arch.registers['fpu_regs'][0], 8
elif name in tag_map:
return ((tag_map[name] + self.load('ftop')) & 7) + self.state.arch.registers['fpu_tags'][0], 1
elif name in ('flags', 'eflags', 'rflags'):
# we tweak the state to convert the vex condition registers into the flags register
if not is_write: # this work doesn't need to be done if we're just gonna overwrite it
self.store('cc_dep1', _get_flags(self.state)[0]) # TODO: can constraints be added by this?
self.store('cc_op', 0) # OP_COPY
return self.state.arch.registers['cc_dep1'][0], self.state.arch.bytes
if is_arm_arch(self.state.arch):
if name == 'flags':
if not is_write:
self.store('cc_dep1', _get_flags(self.state)[0])
self.store('cc_op', 0)
return self.state.arch.registers['cc_dep1'][0], self.state.arch.bytes
return self.state.arch.registers[name]
elif name[0] == '*':
return self.state.registers.load(name[1:]), None
else:
raise SimMemoryError("Trying to address memory with a register name.")
Set the return address of the current state to a specific address. We assume we are at the beginning of a
function, or in other words, we are about to execute the very first instruction of the function.
:param SimState state: The program state
:param int ret_addr: The return address
:return: None
"""
# TODO: the following code is totally untested other than X86 and AMD64. Don't freak out if you find bugs :)
# TODO: Test it
ret_bvv = state.solver.BVV(ret_addr, self.project.arch.bits)
if self.project.arch.name in ('X86', 'AMD64'):
state.stack_push(ret_bvv)
elif is_arm_arch(self.project.arch):
state.regs.lr = ret_bvv
elif self.project.arch.name in ('MIPS32', 'MIPS64'):
state.regs.ra = ret_bvv
elif self.project.arch.name in ('PPC32', 'PPC64'):
state.regs.lr = ret_bvv
else:
l.warning('Return address cannot be set for architecture %s. Please add corresponding logic to '
'VFG._set_return_address().', self.project.arch.name
)
:param SimRegisterVariable variable: The variable to test.
:return: True if it is an acceptable function argument, False otherwise.
:rtype: bool
"""
arch = self.project.arch
if arch.name == 'AARCH64':
return 16 <= variable.reg < 80 # x0-x7
elif arch.name == 'AMD64':
return (24 <= variable.reg < 40 or # rcx, rdx
64 <= variable.reg < 104 or # rsi, rdi, r8, r9, r10
224 <= variable.reg < 480) # xmm0-xmm7
elif is_arm_arch(arch):
return 8 <= variable.reg < 24 # r0-r3
elif arch.name == 'MIPS32':
return 24 <= variable.reg < 40 # a0-a3
elif arch.name == 'PPC32':
return 28 <= variable.reg < 60 # r3-r10
elif arch.name == 'X86':
return (8 <= variable.reg < 24 or # eax, ebx, ecx, edx
160 <= variable.reg < 288) # xmm0-xmm7
else:
l.critical('Unsupported architecture %s.', arch.name)
return True
shift_amount = args[0]
lam = lambda a, sl=shift_amount: a << sl
else:
raise NotImplementedError("Unsupported conversion operation.")
invert_conversion_ops.append(lam)
all_targets_copy = all_targets
all_targets = []
for target_ in all_targets_copy:
for lam in invert_conversion_ops:
target_ = lam(target_)
all_targets.append(target_)
mask = (2 ** self.project.arch.bits) - 1
all_targets = [(target + base_addr) & mask for target in all_targets]
# special case for ARM: if the source block is in THUMB mode, all jump targets should be in THUMB mode, too
if is_arm_arch(self.project.arch) and (addr & 1) == 1:
all_targets = [ target | 1 for target in all_targets ]
# Finally... all targets are ready
illegal_target_found = False
for target in all_targets:
# if the total number of targets is suspicious (it usually implies a failure in applying the
# constraints), check if all jump targets are legal
if len(all_targets) in {0x100, 0x10000} and not self._is_jumptarget_legal(target):
l.info("Jump target %#x is probably illegal. Try to resolve indirect jump at %#x from the next source.",
target, addr)
illegal_target_found = True
break
jump_table.append(target)
if illegal_target_found:
return None
def _handle_CCall(self, expr):
if not isinstance(expr.args[0], pyvex.IRExpr.Const):
return
cond_type_enum = expr.args[0].con.value
if self.arch.name in ('X86', 'AMD64'):
if cond_type_enum in EXPECTED_COND_TYPES[self.arch.name]:
self._handle_Comparison(expr.args[2], expr.args[3])
elif is_arm_arch(self.arch):
if cond_type_enum in EXPECTED_COND_TYPES['ARM']:
self._handle_Comparison(expr.args[2], expr.args[3])
else:
raise ValueError("Unexpected ccall encountered in architecture %s." % self.arch.name)