Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _perform_vex_stmt_PutI_compute(self, base, elemTy, bias, ix, nElems):
# base + ((bias + ix) % nElems) * elemSize
elemSize = self._ty_to_bytes(elemTy)
index = self._perform_vex_expr_Op('Iop_Add32', (self._handle_vex_const(pyvex.const.U32(bias)), ix))
big_index = self._perform_vex_expr_Op('Iop_32HLto64', (self._handle_vex_const(pyvex.const.U32(0)), index))
divmod_index = self._perform_vex_expr_Op('Iop_DivModU64to32', (big_index, self._handle_vex_const(pyvex.const.U32(nElems))))
mod_index = self._perform_vex_expr_Op('Iop_64HIto32', (divmod_index,))
offset = self._perform_vex_expr_Op('Iop_Mul32', (mod_index, self._handle_vex_const(pyvex.const.U32(elemSize))))
return self._perform_vex_expr_Op('Iop_Add32', (self._handle_vex_const(pyvex.const.U32(base)), offset))
def _perform_vex_stmt_PutI(self, base, elemSize, bias, ix, nElems, data):
def _perform_vex_stmt_PutI_compute(self, base, elemTy, bias, ix, nElems):
# base + ((bias + ix) % nElems) * elemSize
elemSize = self._ty_to_bytes(elemTy)
index = self._perform_vex_expr_Op('Iop_Add32', (self._handle_vex_const(pyvex.const.U32(bias)), ix))
big_index = self._perform_vex_expr_Op('Iop_32HLto64', (self._handle_vex_const(pyvex.const.U32(0)), index))
divmod_index = self._perform_vex_expr_Op('Iop_DivModU64to32', (big_index, self._handle_vex_const(pyvex.const.U32(nElems))))
mod_index = self._perform_vex_expr_Op('Iop_64HIto32', (divmod_index,))
offset = self._perform_vex_expr_Op('Iop_Mul32', (mod_index, self._handle_vex_const(pyvex.const.U32(elemSize))))
return self._perform_vex_expr_Op('Iop_Add32', (self._handle_vex_const(pyvex.const.U32(base)), offset))
def _perform_vex_stmt_PutI(self, base, elemSize, bias, ix, nElems, data):
def _handle_vex_stmt_Put(self, stmt):
self._perform_vex_stmt_Put(
self._handle_vex_const(pyvex.const.U32(stmt.offset)),
self._analyze_vex_stmt_Put_data(stmt.data))
def _perform_vex_stmt_Put(self, offset, data, **kwargs):
def _perform_vex_stmt_PutI_compute(self, base, elemTy, bias, ix, nElems):
# base + ((bias + ix) % nElems) * elemSize
elemSize = self._ty_to_bytes(elemTy)
index = self._perform_vex_expr_Op('Iop_Add32', (self._handle_vex_const(pyvex.const.U32(bias)), ix))
big_index = self._perform_vex_expr_Op('Iop_32HLto64', (self._handle_vex_const(pyvex.const.U32(0)), index))
divmod_index = self._perform_vex_expr_Op('Iop_DivModU64to32', (big_index, self._handle_vex_const(pyvex.const.U32(nElems))))
mod_index = self._perform_vex_expr_Op('Iop_64HIto32', (divmod_index,))
offset = self._perform_vex_expr_Op('Iop_Mul32', (mod_index, self._handle_vex_const(pyvex.const.U32(elemSize))))
return self._perform_vex_expr_Op('Iop_Add32', (self._handle_vex_const(pyvex.const.U32(base)), offset))
def _perform_vex_stmt_PutI(self, base, elemSize, bias, ix, nElems, data):
def _perform_vex_stmt_PutI_compute(self, base, elemTy, bias, ix, nElems):
# base + ((bias + ix) % nElems) * elemSize
elemSize = self._ty_to_bytes(elemTy)
index = self._perform_vex_expr_Op('Iop_Add32', (self._handle_vex_const(pyvex.const.U32(bias)), ix))
big_index = self._perform_vex_expr_Op('Iop_32HLto64', (self._handle_vex_const(pyvex.const.U32(0)), index))
divmod_index = self._perform_vex_expr_Op('Iop_DivModU64to32', (big_index, self._handle_vex_const(pyvex.const.U32(nElems))))
mod_index = self._perform_vex_expr_Op('Iop_64HIto32', (divmod_index,))
offset = self._perform_vex_expr_Op('Iop_Mul32', (mod_index, self._handle_vex_const(pyvex.const.U32(elemSize))))
return self._perform_vex_expr_Op('Iop_Add32', (self._handle_vex_const(pyvex.const.U32(base)), offset))
def _perform_vex_stmt_PutI(self, base, elemSize, bias, ix, nElems, data):
def _perform_vex_stmt_PutI_compute(self, base, elemTy, bias, ix, nElems):
# base + ((bias + ix) % nElems) * elemSize
elemSize = self._ty_to_bytes(elemTy)
index = self._perform_vex_expr_Op('Iop_Add32', (self._handle_vex_const(pyvex.const.U32(bias)), ix))
big_index = self._perform_vex_expr_Op('Iop_32HLto64', (self._handle_vex_const(pyvex.const.U32(0)), index))
divmod_index = self._perform_vex_expr_Op('Iop_DivModU64to32', (big_index, self._handle_vex_const(pyvex.const.U32(nElems))))
mod_index = self._perform_vex_expr_Op('Iop_64HIto32', (divmod_index,))
offset = self._perform_vex_expr_Op('Iop_Mul32', (mod_index, self._handle_vex_const(pyvex.const.U32(elemSize))))
return self._perform_vex_expr_Op('Iop_Add32', (self._handle_vex_const(pyvex.const.U32(base)), offset))
def _perform_vex_stmt_PutI(self, base, elemSize, bias, ix, nElems, data):