Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
print("_sigs = ", ufunc._sigs)
sig = ufunc._sigs[0]
cres = ufunc._cres[sig]
#dtypenums, wrapper, env = ufunc.build(cres, sig)
_launch_threads()
_init()
llvm_func = cres.library.get_function(cres.fndesc.llvm_func_name)
wrapper_ptr, env, wrapper_name = build_gufunc_wrapper(llvm_func, cres, sin, sout, {})
cres.library._ensure_finalized()
if config.DEBUG_ARRAY_OPT:
print("parallel function = ", wrapper_name, cres, sig)
byte_t = lc.Type.int(8)
byte_ptr_t = lc.Type.pointer(byte_t)
byte_ptr_ptr_t = lc.Type.pointer(byte_ptr_t)
intp_t = context.get_value_type(types.intp)
uintp_t = context.get_value_type(types.uintp)
intp_ptr_t = lc.Type.pointer(intp_t)
zero = context.get_constant(types.intp, 0)
one = context.get_constant(types.intp, 1)
sizeof_intp = context.get_abi_sizeof(intp_t)
# prepare sched, first pop it out of expr_args, outer_sig, and gu_signature
sched_name = expr_args.pop(0)
sched_typ = outer_sig.args[0]
_outer_sig = signature(types.none, *(outer_sig.args[1:]))
sched_sig = sin.pop(0)
# prepare input/output arguments
inputs, output, out_ty = _prepare_arguments(lowerer, gu_signature, _outer_sig, expr_args)
# call do_scheduling with appropriate arguments
with c.builder.if_then(type_mismatch, likely=False):
c.builder.store(cgutils.true_bit, errorptr)
c.pyapi.err_format(
"PyExc_TypeError",
"can't unbox heterogeneous list: %S != %S",
expected_typobj, typobj,
)
c.pyapi.decref(typobj)
loop.do_break()
c.pyapi.decref(typobj)
# Allocate a new native list
ok, list = listobj.ListInstance.allocate_ex(c.context, c.builder, typ, size)
# Array getitem call
arr_get_fnty = LLType.function(LLType.pointer(c.pyapi.pyobj), [c.pyapi.pyobj, c.pyapi.py_ssize_t])
arr_get_fn = c.pyapi._get_function(arr_get_fnty, name="array_getptr1")
with c.builder.if_else(ok, likely=True) as (if_ok, if_not_ok):
with if_ok:
list.size = size
zero = lir.Constant(size.type, 0)
with c.builder.if_then(c.builder.icmp_signed('>', size, zero),
likely=True):
# Traverse Python list and unbox objects into native list
with _NumbaTypeHelper(c) as nth:
# Note: *expected_typobj* can't be NULL
# TODO: enable type checking when emty list item in
# list(list(str)) case can be handled
# expected_typobj = nth.typeof(c.builder.load(
# c.builder.call(arr_get_fn, [obj, zero])))
with cgutils.for_range(c.builder, size) as loop:
def partitionedlist_len(context, builder, sig, args):
partitionedlisttpe, = sig.args
partitionedlistval, = args
partitionedlist = numba.cgutils.create_struct_proxy(partitionedlisttpe)(context, builder, value=partitionedlistval)
ptr = builder.inttoptr(
builder.add(builder.ptrtoint(partitionedlist.offsets, llvmlite.llvmpy.core.Type.int(64)), builder.mul(partitionedlist.numpartitions, literal_int64(8))),
llvmlite.llvmpy.core.Type.pointer(context.get_value_type(numba.types.int64)))
return numba.targets.arrayobj.load_item(context, builder, numba.types.int64[:], ptr)
start = load_range(start)
stop = load_range(stop)
assert(step == 1) # We do not support loop steps other than 1
step = load_range(step)
loop_ranges[i] = (start, stop, step)
if config.DEBUG_ARRAY_OPT:
print("call_parallel_gufunc loop_ranges[{}] = ".format(i), start,
stop, step)
cgutils.printf(builder, "loop range[{}]: %d %d (%d)\n".format(i),
start, stop, step)
# Commonly used LLVM types and constants
byte_t = lc.Type.int(8)
byte_ptr_t = lc.Type.pointer(byte_t)
byte_ptr_ptr_t = lc.Type.pointer(byte_ptr_t)
intp_t = context.get_value_type(types.intp)
uintp_t = context.get_value_type(types.uintp)
intp_ptr_t = lc.Type.pointer(intp_t)
uintp_ptr_t = lc.Type.pointer(uintp_t)
zero = context.get_constant(types.uintp, 0)
one = context.get_constant(types.uintp, 1)
one_type = one.type
sizeof_intp = context.get_abi_sizeof(intp_t)
# Prepare sched, first pop it out of expr_args, outer_sig, and gu_signature
expr_args.pop(0)
sched_sig = sin.pop(0)
if config.DEBUG_ARRAY_OPT:
print("Parfor has potentially negative start", index_var_typ.signed)
def declare_string(builder, value):
lmod = builder.basic_block.function.module
cval = lc.Constant.stringz(value)
gl = lmod.add_global_variable(cval.type, name="_str",
addrspace=nvvm.ADDRSPACE_CONSTANT)
gl.linkage = lc.LINKAGE_INTERNAL
gl.global_constant = True
gl.initializer = cval
charty = lc.Type.int(8)
constcharptrty = lc.Type.pointer(charty, nvvm.ADDRSPACE_CONSTANT)
charptr = builder.bitcast(gl, constcharptrty)
conv = insert_addrspace_conv(lmod, charty, nvvm.ADDRSPACE_CONSTANT)
return builder.call(conv, [charptr])
def declare_atomic_max_float32(lmod):
fname = '___numba_atomic_float_max'
fnty = lc.Type.function(lc.Type.float(),
(lc.Type.pointer(lc.Type.float()), lc.Type.float()))
return lmod.get_or_insert_function(fnty, fname)
import sys
import ctypes
import struct as struct_
from llvmlite.llvmpy.core import Type, Constant
_trace_refs_ = hasattr(sys, 'getobjects')
_plat_bits = struct_.calcsize('@P') * 8
_int8 = Type.int(8)
_int32 = Type.int(32)
_void_star = Type.pointer(_int8)
_int8_star = _void_star
_sizeof_py_ssize_t = ctypes.sizeof(getattr(ctypes, 'c_size_t'))
_llvm_py_ssize_t = Type.int(_sizeof_py_ssize_t * 8)
if _trace_refs_:
_pyobject_head = Type.struct([_void_star, _void_star,
_llvm_py_ssize_t, _void_star])
_pyobject_head_init = Constant.struct([
Constant.null(_void_star), # _ob_next
Constant.null(_void_star), # _ob_prev
Constant.int(_llvm_py_ssize_t, 1), # ob_refcnt
Constant.null(_void_star), # ob_type
])
import llvmlite.binding as ll
from llvmlite import ir
from numba.targets.imputils import Registry
from numba import cgutils
from numba import types
from numba.itanium_mangler import mangle_c, mangle, mangle_type
from . import target
from . import stubs
from . import hlc
from . import enums
registry = Registry()
lower = registry.lower
_void_value = lc.Constant.null(lc.Type.pointer(lc.Type.int(8)))
# -----------------------------------------------------------------------------
def _declare_function(context, builder, name, sig, cargs,
mangler=mangle_c):
"""Insert declaration for a opencl builtin function.
Uses the Itanium mangler.
Args
----
context: target context
builder: llvm builder
name: str
domain partitioning.
NOTE: The execution backend is passed the requested thread count, but it can
choose to ignore it (TBB)!
"""
assert isinstance(info, tuple) # guard against old usage
# Declare types and function
byte_t = lc.Type.int(8)
byte_ptr_t = lc.Type.pointer(byte_t)
byte_ptr_ptr_t = lc.Type.pointer(byte_ptr_t)
intp_t = ctx.get_value_type(types.intp)
intp_ptr_t = lc.Type.pointer(intp_t)
fnty = lc.Type.function(lc.Type.void(), [lc.Type.pointer(byte_ptr_t),
lc.Type.pointer(intp_t),
lc.Type.pointer(intp_t),
byte_ptr_t])
wrapperlib = ctx.codegen().create_library('parallelgufuncwrapper')
mod = wrapperlib.create_ir_module('parallel.gufunc.wrapper')
kernel_name = ".kernel.{}_{}".format(id(info.env), info.name)
lfunc = mod.add_function(fnty, name=kernel_name)
bb_entry = lfunc.append_basic_block('')
# Function body starts
builder = lc.Builder(bb_entry)
args, dimensions, steps, data = lfunc.args
# Release the GIL (and ensure we have the GIL)
# Note: numpy ufunc may not always release the GIL; thus,