How to use llvmlite - 10 common examples

To help you get started, we’ve selected a few llvmlite examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github diana-hep / oamap / oamap / compiler.py View on Github external
def arrayitem(context, builder, idx, ptrs, lens, at, dtype):
        offset = builder.mul(idx, literal_int64(numba.types.intp.bitwidth // 8))

        ptrposition = builder.inttoptr(
            builder.add(builder.ptrtoint(ptrs, llvmlite.llvmpy.core.Type.int(numba.types.intp.bitwidth)), offset),
            llvmlite.llvmpy.core.Type.pointer(context.get_value_type(numba.types.intp)))

        lenposition = builder.inttoptr(
            builder.add(builder.ptrtoint(lens, llvmlite.llvmpy.core.Type.int(numba.types.intp.bitwidth)), offset),
            llvmlite.llvmpy.core.Type.pointer(context.get_value_type(numba.types.intp)))

        ptr = numba.targets.arrayobj.load_item(context, builder, numba.types.intp[:], ptrposition)
        len = numba.targets.arrayobj.load_item(context, builder, numba.types.intp[:], lenposition)

        raise_exception(context, builder, builder.icmp_unsigned(">=", at, len), RuntimeError("array index out of range"))

        finalptr = builder.inttoptr(
            builder.add(ptr, builder.mul(at, literal_int64(dtype.itemsize))),
            llvmlite.llvmpy.core.Type.pointer(context.get_value_type(numba.from_dtype(dtype))))

        return numba.targets.arrayobj.load_item(context, builder, numba.from_dtype(dtype)[:], finalptr)
github PrincetonUniversity / PsyNeuLink / tests / llvm / test_helpers.py View on Github external
def test_helper_all_close(mode):

    with pnlvm.LLVMBuilderContext() as ctx:
        arr_ptr_ty = ir.ArrayType(ctx.float_ty, DIM_X).as_pointer()
        func_ty = ir.FunctionType(ir.VoidType(), [arr_ptr_ty, arr_ptr_ty,
                                                  ir.IntType(32).as_pointer()])

        custom_name = ctx.get_unique_name("all_close")
        function = ir.Function(ctx.module, func_ty, name=custom_name)
        in1, in2, out = function.args
        block = function.append_basic_block(name="entry")
        builder = ir.IRBuilder(block)

        all_close = pnlvm.helpers.all_close(builder, in1, in2)
        res = builder.select(all_close, out.type.pointee(1), out.type.pointee(0))
        builder.store(res, out)
        builder.ret_void()

    vec1 = copy.deepcopy(VECTOR)
    vec2 = copy.deepcopy(VECTOR)

    ref = np.allclose(vec1, vec2)
    bin_f = pnlvm.LLVMBinaryFunction.get(custom_name)
    if mode == 'CPU':
        ct_ty = pnlvm._convert_llvm_ir_to_ctype(arr_ptr_ty)
github PrincetonUniversity / PsyNeuLink / tests / llvm / test_builtins_matrix.py View on Github external
def test_dot_transposed_llvm_constant_dim(benchmark, mode):
    custom_name = None

    with pnlvm.LLVMBuilderContext() as ctx:
        custom_name = ctx.get_unique_name("vxsqm")
        double_ptr_ty = ctx.float_ty.as_pointer()
        func_ty = ir.FunctionType(ir.VoidType(), (double_ptr_ty, double_ptr_ty, double_ptr_ty))

        # get builtin IR
        builtin = ctx.import_llvm_function("__pnl_builtin_vxm_transposed")

        # Create square vector matrix multiply
        function = ir.Function(ctx.module, func_ty, name=custom_name)
        _x = ctx.int32_ty(DIM_X)
        _y = ctx.int32_ty(DIM_Y)
        _v, _m, _o = function.args
        block = function.append_basic_block(name="entry")
        builder = ir.IRBuilder(block)
        builder.call(builtin, [_v, _m, _x, _y, _o])
        builder.ret_void()

    binf2 = pnlvm.LLVMBinaryFunction.get(custom_name)
    if mode == 'CPU':
        benchmark(binf2, ct_tvec, ct_u, ct_tvec_res)
    else:
        import pycuda
        cuda_vec = pycuda.driver.In(trans_vector)
        cuda_mat = pycuda.driver.In(u)
        cuda_res = pycuda.driver.Out(llvm_tvec_res)
github PrincetonUniversity / PsyNeuLink / tests / llvm / test_custom_func.py View on Github external
binf.c_func(ct_vec, ct_mat, x, y, ct_res)
    else:
        binf.cuda_wrap_call(vector, matrix, np.int32(x), np.int32(y), orig_res)

    custom_name = None

    with pnlvm.LLVMBuilderContext() as ctx:
        custom_name = ctx.get_unique_name("vxsqm")
        double_ptr_ty = ctx.convert_python_struct_to_llvm_ir(1.0).as_pointer()
        func_ty = ir.FunctionType(ir.VoidType(), (double_ptr_ty, double_ptr_ty, double_ptr_ty))

        # get builtin IR
        builtin = ctx.import_llvm_function("__pnl_builtin_vxm")

        # Create square vector matrix multiply
        function = ir.Function(ctx.module, func_ty, name=custom_name)
        _x = ctx.int32_ty(x)
        _v, _m, _o = function.args
        block = function.append_basic_block(name="entry")
        builder = ir.IRBuilder(block)
        builder.call(builtin, [_v, _m, _x, _x, _o])
        builder.ret_void()

    binf2 = pnlvm.LLVMBinaryFunction.get(custom_name)
    new_res = np.empty_like(llvm_res)

    if mode == 'CPU':
        ct_res = new_res.ctypes.data_as(ctypes.POINTER(ct_res_ty))

        binf2(ct_vec, ct_mat, ct_res)
    else:
        binf2.cuda_wrap_call(vector, matrix, new_res)
github PrincetonUniversity / PsyNeuLink / tests / llvm / matmul.py View on Github external
if not np.allclose(llvm_res, result):
    print("TEST FAILED LLVM results differ!")
    print(llvm_res)
    print(result)

start = timeit.default_timer()

with pnlvm.LLVMBuilderContext() as ctx:
    double_ptr_ty = ctx.float_ty.as_pointer()
    func_ty = ir.FunctionType(ir.VoidType(), (double_ptr_ty, double_ptr_ty, double_ptr_ty))

    # get builtin IR
    builtin = ctx.get_llvm_function('__pnl_builtin_vxm')

    # Create square vector matrix multiply
    function = ir.Function(ctx.module, func_ty, name="vxsqm")
    _x = ctx.int32_ty(x)
    _y = ctx.int32_ty(y)
    _v, _m, _o = function.args
    block = function.append_basic_block(name="entry")
    builder = ir.IRBuilder(block)
    builder.call(builtin, [_v, _m, _x, _y, _o])
    builder.ret_void()

# This triggers recompile if needed so it should be included in the measurement
binf2 = pnlvm.LLVMBinaryFunction.get('vxsqm')
stop = timeit.default_timer()
print("Build time elapsed {:f}".format(stop-start))

start = timeit.default_timer()
for _ in range(ITERATIONS):
    binf2(ct_vec, ct_mat, ct_res)
github toor-de-force / Ghidra-to-LLVM / src / run-all-tests.py View on Github external
def build_function(name, module):
    func_return = ir.VoidType()
    fnty = ir.FunctionType(func_return, [])
    ir_func = ir.Function(module, fnty, name)
    return ir_func
github PrincetonUniversity / PsyNeuLink / tests / llvm / test_helpers.py View on Github external
def test_helper_all_close(mode):

    with pnlvm.LLVMBuilderContext() as ctx:
        arr_ptr_ty = ir.ArrayType(ctx.float_ty, DIM_X).as_pointer()
        func_ty = ir.FunctionType(ir.VoidType(), [arr_ptr_ty, arr_ptr_ty,
                                                  ir.IntType(32).as_pointer()])

        custom_name = ctx.get_unique_name("all_close")
        function = ir.Function(ctx.module, func_ty, name=custom_name)
        in1, in2, out = function.args
        block = function.append_basic_block(name="entry")
        builder = ir.IRBuilder(block)

        all_close = pnlvm.helpers.all_close(builder, in1, in2)
        res = builder.select(all_close, out.type.pointee(1), out.type.pointee(0))
        builder.store(res, out)
        builder.ret_void()

    vec1 = copy.deepcopy(VECTOR)
    vec2 = copy.deepcopy(VECTOR)
github PrincetonUniversity / PsyNeuLink / tests / llvm / test_matmul_transposed.py View on Github external
def test_matmul_transposed_llvm_constant_dim(benchmark, mode):
    custom_name = None

    with pnlvm.LLVMBuilderContext() as ctx:
        custom_name = ctx.get_unique_name("vxsqm")
        double_ptr_ty = ctx.float_ty.as_pointer()
        func_ty = ir.FunctionType(ir.VoidType(), (double_ptr_ty, double_ptr_ty, double_ptr_ty))

        # get builtin IR
        builtin = ctx.get_llvm_function("__pnl_builtin_vxm_transposed")

        # Create square vector matrix multiply
        function = ir.Function(ctx.module, func_ty, name=custom_name)
        _x = ctx.int32_ty(x)
        _y = ctx.int32_ty(y)
        _v, _m, _o = function.args
        block = function.append_basic_block(name="entry")
        builder = ir.IRBuilder(block)
        builder.call(builtin, [_v, _m, _x, _y, _o])
        builder.ret_void()

    binf2 = pnlvm.LLVMBinaryFunction.get(custom_name)
    if mode == 'CPU':
github PrincetonUniversity / PsyNeuLink / tests / llvm / matmul.py View on Github external
start = timeit.default_timer()

with pnlvm.LLVMBuilderContext() as ctx:
    double_ptr_ty = ctx.float_ty.as_pointer()
    func_ty = ir.FunctionType(ir.VoidType(), (double_ptr_ty, double_ptr_ty, double_ptr_ty))

    # get builtin IR
    builtin = ctx.get_llvm_function('__pnl_builtin_vxm')

    # Create square vector matrix multiply
    function = ir.Function(ctx.module, func_ty, name="vxsqm")
    _x = ctx.int32_ty(x)
    _y = ctx.int32_ty(y)
    _v, _m, _o = function.args
    block = function.append_basic_block(name="entry")
    builder = ir.IRBuilder(block)
    builder.call(builtin, [_v, _m, _x, _y, _o])
    builder.ret_void()

# This triggers recompile if needed so it should be included in the measurement
binf2 = pnlvm.LLVMBinaryFunction.get('vxsqm')
stop = timeit.default_timer()
print("Build time elapsed {:f}".format(stop-start))

start = timeit.default_timer()
for _ in range(ITERATIONS):
    binf2(ct_vec, ct_mat, ct_res)
stop = timeit.default_timer()
print("LLVM-custom time elapsed {:f}".format(stop-start))

# Use all close to ignore rounding errors
if not np.allclose(llvm_res, result):
github numba / numba / numba / datamodel / testing.py View on Github external
def test_as_arg(self):
        """
        - Is as_arg() and from_arg() implemented?
        - Are they the inverse of each other?
        """
        fnty = ir.FunctionType(ir.VoidType(), [])
        function = ir.Function(self.module, fnty, name="test_as_arg")
        builder = ir.IRBuilder()
        builder.position_at_end(function.append_basic_block())

        undef_value = ir.Constant(self.datamodel.get_value_type(), None)
        args = self.datamodel.as_argument(builder, undef_value)
        self.assertIsNot(args, NotImplemented, "as_argument returned "
                                               "NotImplementedError")

        if isinstance(args, (tuple, list)):
            def recur_tuplize(args, func=None):
                for arg in args:
                    if isinstance(arg, (tuple, list)):
                        yield tuple(recur_tuplize(arg, func=func))
                    else:
                        if func is None:
                            yield arg
                        else: