How to use the llvmlite.ir.Function function in llvmlite

To help you get started, we’ve selected a few llvmlite examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github PrincetonUniversity / PsyNeuLink / tests / llvm / test_helpers.py View on Github external
def test_helper_all_close(mode):

    with pnlvm.LLVMBuilderContext() as ctx:
        arr_ptr_ty = ir.ArrayType(ctx.float_ty, DIM_X).as_pointer()
        func_ty = ir.FunctionType(ir.VoidType(), [arr_ptr_ty, arr_ptr_ty,
                                                  ir.IntType(32).as_pointer()])

        custom_name = ctx.get_unique_name("all_close")
        function = ir.Function(ctx.module, func_ty, name=custom_name)
        in1, in2, out = function.args
        block = function.append_basic_block(name="entry")
        builder = ir.IRBuilder(block)

        all_close = pnlvm.helpers.all_close(builder, in1, in2)
        res = builder.select(all_close, out.type.pointee(1), out.type.pointee(0))
        builder.store(res, out)
        builder.ret_void()

    vec1 = copy.deepcopy(VECTOR)
    vec2 = copy.deepcopy(VECTOR)

    ref = np.allclose(vec1, vec2)
    bin_f = pnlvm.LLVMBinaryFunction.get(custom_name)
    if mode == 'CPU':
        ct_ty = pnlvm._convert_llvm_ir_to_ctype(arr_ptr_ty)
github PrincetonUniversity / PsyNeuLink / tests / llvm / test_builtins_matrix.py View on Github external
def test_dot_transposed_llvm_constant_dim(benchmark, mode):
    custom_name = None

    with pnlvm.LLVMBuilderContext() as ctx:
        custom_name = ctx.get_unique_name("vxsqm")
        double_ptr_ty = ctx.float_ty.as_pointer()
        func_ty = ir.FunctionType(ir.VoidType(), (double_ptr_ty, double_ptr_ty, double_ptr_ty))

        # get builtin IR
        builtin = ctx.import_llvm_function("__pnl_builtin_vxm_transposed")

        # Create square vector matrix multiply
        function = ir.Function(ctx.module, func_ty, name=custom_name)
        _x = ctx.int32_ty(DIM_X)
        _y = ctx.int32_ty(DIM_Y)
        _v, _m, _o = function.args
        block = function.append_basic_block(name="entry")
        builder = ir.IRBuilder(block)
        builder.call(builtin, [_v, _m, _x, _y, _o])
        builder.ret_void()

    binf2 = pnlvm.LLVMBinaryFunction.get(custom_name)
    if mode == 'CPU':
        benchmark(binf2, ct_tvec, ct_u, ct_tvec_res)
    else:
        import pycuda
        cuda_vec = pycuda.driver.In(trans_vector)
        cuda_mat = pycuda.driver.In(u)
        cuda_res = pycuda.driver.Out(llvm_tvec_res)
github PrincetonUniversity / PsyNeuLink / tests / llvm / test_custom_func.py View on Github external
binf.c_func(ct_vec, ct_mat, x, y, ct_res)
    else:
        binf.cuda_wrap_call(vector, matrix, np.int32(x), np.int32(y), orig_res)

    custom_name = None

    with pnlvm.LLVMBuilderContext() as ctx:
        custom_name = ctx.get_unique_name("vxsqm")
        double_ptr_ty = ctx.convert_python_struct_to_llvm_ir(1.0).as_pointer()
        func_ty = ir.FunctionType(ir.VoidType(), (double_ptr_ty, double_ptr_ty, double_ptr_ty))

        # get builtin IR
        builtin = ctx.import_llvm_function("__pnl_builtin_vxm")

        # Create square vector matrix multiply
        function = ir.Function(ctx.module, func_ty, name=custom_name)
        _x = ctx.int32_ty(x)
        _v, _m, _o = function.args
        block = function.append_basic_block(name="entry")
        builder = ir.IRBuilder(block)
        builder.call(builtin, [_v, _m, _x, _x, _o])
        builder.ret_void()

    binf2 = pnlvm.LLVMBinaryFunction.get(custom_name)
    new_res = np.empty_like(llvm_res)

    if mode == 'CPU':
        ct_res = new_res.ctypes.data_as(ctypes.POINTER(ct_res_ty))

        binf2(ct_vec, ct_mat, ct_res)
    else:
        binf2.cuda_wrap_call(vector, matrix, new_res)
github PrincetonUniversity / PsyNeuLink / tests / llvm / matmul.py View on Github external
if not np.allclose(llvm_res, result):
    print("TEST FAILED LLVM results differ!")
    print(llvm_res)
    print(result)

start = timeit.default_timer()

with pnlvm.LLVMBuilderContext() as ctx:
    double_ptr_ty = ctx.float_ty.as_pointer()
    func_ty = ir.FunctionType(ir.VoidType(), (double_ptr_ty, double_ptr_ty, double_ptr_ty))

    # get builtin IR
    builtin = ctx.get_llvm_function('__pnl_builtin_vxm')

    # Create square vector matrix multiply
    function = ir.Function(ctx.module, func_ty, name="vxsqm")
    _x = ctx.int32_ty(x)
    _y = ctx.int32_ty(y)
    _v, _m, _o = function.args
    block = function.append_basic_block(name="entry")
    builder = ir.IRBuilder(block)
    builder.call(builtin, [_v, _m, _x, _y, _o])
    builder.ret_void()

# This triggers recompile if needed so it should be included in the measurement
binf2 = pnlvm.LLVMBinaryFunction.get('vxsqm')
stop = timeit.default_timer()
print("Build time elapsed {:f}".format(stop-start))

start = timeit.default_timer()
for _ in range(ITERATIONS):
    binf2(ct_vec, ct_mat, ct_res)
github toor-de-force / Ghidra-to-LLVM / src / run-all-tests.py View on Github external
def build_function(name, module):
    func_return = ir.VoidType()
    fnty = ir.FunctionType(func_return, [])
    ir_func = ir.Function(module, fnty, name)
    return ir_func
github numba / numba / numba / dictobject.py View on Github external
def codegen(context, builder, sig, args):
        vtablety = ir.LiteralStructType([
            ll_voidptr_type,  # equal
            ll_voidptr_type,  # key incref
            ll_voidptr_type,  # key decref
            ll_voidptr_type,  # val incref
            ll_voidptr_type,  # val decref
        ])
        setmethod_fnty = ir.FunctionType(
            ir.VoidType(),
            [ll_dict_type, vtablety.as_pointer()]
        )
        setmethod_fn = ir.Function(
            builder.module,
            setmethod_fnty,
            name='numba_dict_set_method_table',
        )
        dp = args[0]
        vtable = cgutils.alloca_once(builder, vtablety, zfill=True)

        # install key incref/decref
        key_equal_ptr = cgutils.gep_inbounds(builder, vtable, 0, 0)
        key_incref_ptr = cgutils.gep_inbounds(builder, vtable, 0, 1)
        key_decref_ptr = cgutils.gep_inbounds(builder, vtable, 0, 2)
        val_incref_ptr = cgutils.gep_inbounds(builder, vtable, 0, 3)
        val_decref_ptr = cgutils.gep_inbounds(builder, vtable, 0, 4)

        dm_key = context.data_model_manager[keyty.instance_type]
        if dm_key.contains_nrt_meminfo():
github AndreaOrru / Lucy / compiler / lucyc.py View on Github external
def new_func(self, typ, ide):
        self.func = ir.Function(self.module, typ, name=ide)
        self.symbols.bind(ide, self.func)
github hassanalinali / Lesma / src / lesma / compiler / builtins.py View on Github external
def dynamic_array_append(self, dyn_array_ptr, array_type):
    # START
    dyn_array_append_type = ir.FunctionType(type_map[VOID], [dyn_array_ptr, array_type])
    dyn_array_append = ir.Function(self.module, dyn_array_append_type, '{}.array.append'.format(str(array_type)))
    dyn_array_append.args[0].name = 'self'
    dyn_array_append_entry = dyn_array_append.append_basic_block('entry')
    builder = ir.IRBuilder(dyn_array_append_entry)
    self.builder = builder
    dyn_array_append_exit = dyn_array_append.append_basic_block('exit')
    builder.position_at_end(dyn_array_append_entry)
    array_ptr = builder.alloca(dyn_array_ptr)
    builder.store(dyn_array_append.args[0], array_ptr)
    value_ptr = builder.alloca(array_type)
    builder.store(dyn_array_append.args[1], value_ptr)

    # BODY
    builder.call(self.module.get_global('{}.array.double_capacity_if_full'.format(str(array_type))), [builder.load(array_ptr)])

    size_ptr = builder.gep(builder.load(array_ptr), [zero_32, zero_32], inbounds=True)
    size_val = builder.load(size_ptr)
github hassanalinali / Lesma / src / lesma / compiler / code_generator.py View on Github external
ir.Function(self.module, free_ty, 'free')

        exit_ty = ir.FunctionType(type_map[VOID], [type_map[INT32]])
        ir.Function(self.module, exit_ty, 'exit')

        putchar_ty = ir.FunctionType(type_map[INT], [type_map[INT]])
        ir.Function(self.module, putchar_ty, 'putchar')

        printf_ty = ir.FunctionType(type_map[INT32], [type_map[INT8].as_pointer()], var_arg=True)
        ir.Function(self.module, printf_ty, 'printf')

        scanf_ty = ir.FunctionType(type_map[INT], [type_map[INT8].as_pointer()], var_arg=True)
        ir.Function(self.module, scanf_ty, 'scanf')

        getchar_ty = ir.FunctionType(ir.IntType(8), [])
        ir.Function(self.module, getchar_ty, 'getchar')

        puts_ty = ir.FunctionType(type_map[INT], [type_map[INT].as_pointer()])
        ir.Function(self.module, puts_ty, 'puts')

        define_builtins(self)
github paivett / gone / gone / llvmgen.py View on Github external
def generate_code(self, ir_function):
        # Given a sequence of SSA intermediate code tuples, generate LLVM
        # instructions using the current builder (self.builder).  Each
        # opcode tuple (opcode, args) is dispatched to a method of the
        # form self.emit_opcode(args)
        # print(ir_function)
        self.function = Function(self.module,
                                 FunctionType(
                                     LLVM_TYPE_MAPPING[ir_function.return_type],
                                     [LLVM_TYPE_MAPPING[ptype] for _, ptype in ir_function.parameters]
                                 ),
                                 name=ir_function.name)

        self.block = self.function.append_basic_block('entry')
        self.builder = IRBuilder(self.block)

        # Save the function as a global to be referenced later on another
        # function
        self.globals[ir_function.name] = self.function

        # All local variables are stored here
        self.locals = { }