How to use the wgpu.base function in wgpu

To help you get started, we’ve selected a few wgpu examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github almarklein / wgpu-py / wgpu / backends / rs.py View on Github external
#     )

    # wgpu.help('renderpassencoderendpass', dev=True)
    def end_pass(self):
        _lib.wgpu_render_pass_end_pass(self._internal)


class GPURenderBundleEncoder(base.GPURenderBundleEncoder):
    pass

    # Not yet implemented in wgpu-native
    # def finish(self, *, label=""):
    #     ...


class GPUQueue(base.GPUQueue):
    # wgpu.help('queuesubmit', dev=True)
    def submit(self, command_buffers):
        command_buffer_ids = [cb._internal for cb in command_buffers]
        c_command_buffers = ffi.new("WGPUCommandBufferId []", command_buffer_ids)
        _lib.wgpu_queue_submit(
            self._internal, c_command_buffers, len(command_buffer_ids)
        )

    # Seems not yet implemented in wgpu-native
    # def copy_image_bitmap_to_texture(self, source, destination, copy_size):
    #     ...

    # wgpu.help('Buffer', 'Size64', 'queuewritebuffer', dev=True)
    def write_buffer(self, buffer, buffer_offset, data, data_offset=0, size=None):

        # We support anything that memoryview supports, i.e. anything
github almarklein / wgpu-py / wgpu / backends / rs.py View on Github external
# Used for storage texture bindings.
                storage_texture_format=entry.get("storage_texture_format", 0),
            )
            c_entries_list.append(c_entry)

        c_label = ffi.new("char []", label.encode())
        struct = new_struct_p(
            "WGPUBindGroupLayoutDescriptor *",
            label=c_label,
            entries=ffi.new("WGPUBindGroupLayoutEntry []", c_entries_list),
            entries_length=len(c_entries_list),
        )

        id = _lib.wgpu_device_create_bind_group_layout(self._internal, struct)

        return base.GPUBindGroupLayout(label, id, self, entries)
github almarklein / wgpu-py / wgpu / __init__.py View on Github external
def _register_backend(func, func_async):
    if not (callable(func) and func.__name__ == "request_adapter"):
        raise RuntimeError(
            "WGPU backend must be registered with function called request_adapter."
        )
    if not (callable(func_async) and func_async.__name__ == "request_adapter_async"):
        raise RuntimeError(
            "WGPU backend must be registered with function called request_adapter_async."
        )
    if globals()["request_adapter"] is not base.request_adapter:
        raise RuntimeError("WGPU backend can only be set once.")
    globals()["request_adapter"] = func
    globals()["request_adapter_async"] = func_async
github almarklein / wgpu-py / wgpu / backends / rs.py View on Github external
self._internal, c_source, c_destination, c_copy_size,
        )

    # wgpu.help('CommandBufferDescriptor', 'commandencoderfinish', dev=True)
    def finish(self, *, label=""):
        struct = new_struct_p("WGPUCommandBufferDescriptor *", todo=0)
        id = _lib.wgpu_command_encoder_finish(self._internal, struct)
        return base.GPUCommandBuffer(label, id, self)

    # todo: these do not exist yet for command_encoder in wgpu-native
    # def push_debug_group(self, group_label):
    # def pop_debug_group(self):
    # def insert_debug_marker(self, marker_label):


class GPUProgrammablePassEncoder(base.GPUProgrammablePassEncoder):
    # wgpu.help('BindGroup', 'Index32', 'Size32', 'Size64', 'programmablepassencodersetbindgroup', dev=True)
    def set_bind_group(
        self,
        index,
        bind_group,
        dynamic_offsets_data,
        dynamic_offsets_data_start,
        dynamic_offsets_data_length,
    ):
        offsets = list(dynamic_offsets_data)
        c_offsets = ffi.new("WGPUDynamicOffset []", offsets)
        bind_group_id = bind_group._internal
        if isinstance(self, GPUComputePassEncoder):
            _lib.wgpu_compute_pass_set_bind_group(
                self._internal, index, bind_group_id, c_offsets, len(offsets)
            )
github almarklein / wgpu-py / wgpu / backends / rs.py View on Github external
def create_bind_group(
        self,
        *,
        label="",
        layout: "GPUBindGroupLayout",
        entries: "GPUBindGroupEntry-list",
    ):

        c_entries_list = []
        for entry in entries:
            _check_struct("BindGroupEntry", entry)
            # The resource can be a sampler, texture view, or buffer descriptor
            resource = entry["resource"]
            if isinstance(resource, base.GPUSampler):
                c_resource_kwargs = {
                    "tag": 1,  # WGPUBindingResource_Tag.WGPUBindingResource_Sampler
                    "sampler": new_struct(
                        "WGPUBindingResource_WGPUSampler_Body", _0=resource._internal
                    ),
                }
            elif isinstance(resource, base.GPUTextureView):
                c_resource_kwargs = {
                    "tag": 2,  # WGPUBindingResource_Tag.WGPUBindingResource_TextureView
                    "texture_view": new_struct(
                        "WGPUBindingResource_WGPUTextureView_Body",
                        _0=resource._internal,
                    ),
                }
            elif isinstance(resource, dict):  # Buffer binding
                _check_struct("BufferBinding", resource)
github almarklein / wgpu-py / wgpu / backends / rs.py View on Github external
self._destroy()  # no-cover

    def _destroy(self):
        if self._internal is not None:
            self._internal, internal = None, self._internal
            _lib.wgpu_texture_destroy(internal)


class GPUBindGroup(base.GPUBindGroup):
    def _destroy(self):
        if self._internal is not None:
            self._internal, internal = None, self._internal
            _lib.wgpu_bind_group_layout_destroy(internal)


class GPUPipelineLayout(base.GPUPipelineLayout):
    def _destroy(self):
        if self._internal is not None:
            self._internal, internal = None, self._internal
            _lib.wgpu_pipeline_layout_destroy(internal)


class GPUShaderModule(base.GPUShaderModule):
    # wgpu.help('shadermodulecompilationinfo', dev=True)
    def compilation_info(self):
        return super().compilation_info()

    def _destroy(self):
        if self._internal is not None:
            self._internal, internal = None, self._internal
            _lib.wgpu_shader_module_destroy(internal)
github almarklein / wgpu-py / docs / conf.py View on Github external
# Make flags and enum appear better in docs
wgpu.enums._use_sphinx_repr = True
wgpu.flags._use_sphinx_repr = True

# Simplify the signature of the two root functions
wgpu.request_adapter.__doc__ = (
    "request_adapter(**parameters)\n\n    " + wgpu.request_adapter.__doc__.lstrip()
)
wgpu.request_adapter_async.__doc__ = (
    "request_adapter_async(**parameters)\n\n    "
    + wgpu.request_adapter_async.__doc__.lstrip()
)

# Also tweak docstrings of classes and their methods
for cls in wgpu.base.__dict__.values():
    if not isinstance(cls, type):
        continue
    # Change class docstring to include a link to the base class,
    # and the class' signature is not shown
    base_info = ""
    base_cls = cls.mro()[1]
    if base_cls is not object:
        base_info = f"    *Subclass of* :class:`.{base_cls.__name__}`\n\n"
    cls.__doc__ = cls.__name__ + "()\n\n" + base_info + "    " + cls.__doc__.lstrip()
    # Change docstring of methods that dont have positional arguments
    for method in cls.__dict__.values():
        if not (callable(method) and hasattr(method, "__code__")):
            continue
        if method.__code__.co_argcount == 1 and method.__code__.co_kwonlyargcount > 0:
            sig = method.__name__ + "(**parameters)"
            method.__doc__ = sig + "\n\n        " + method.__doc__.lstrip()
github almarklein / wgpu-py / wgpu / backends / rs.py View on Github external
class GPUBindGroup(base.GPUBindGroup):
    def _destroy(self):
        if self._internal is not None:
            self._internal, internal = None, self._internal
            _lib.wgpu_bind_group_layout_destroy(internal)


class GPUPipelineLayout(base.GPUPipelineLayout):
    def _destroy(self):
        if self._internal is not None:
            self._internal, internal = None, self._internal
            _lib.wgpu_pipeline_layout_destroy(internal)


class GPUShaderModule(base.GPUShaderModule):
    # wgpu.help('shadermodulecompilationinfo', dev=True)
    def compilation_info(self):
        return super().compilation_info()

    def _destroy(self):
        if self._internal is not None:
            self._internal, internal = None, self._internal
            _lib.wgpu_shader_module_destroy(internal)


class GPUComputePipeline(base.GPUComputePipeline):
    def _destroy(self):
        if self._internal is not None:
            self._internal, internal = None, self._internal
            _lib.wgpu_compute_pipeline_destroy(internal)
github almarklein / wgpu-py / wgpu / backends / rs.py View on Github external
class GPUComputePipeline(base.GPUComputePipeline):
    def _destroy(self):
        if self._internal is not None:
            self._internal, internal = None, self._internal
            _lib.wgpu_compute_pipeline_destroy(internal)


class GPURenderPipeline(base.GPURenderPipeline):
    def _destroy(self):
        if self._internal is not None:
            self._internal, internal = None, self._internal
            _lib.wgpu_render_pipeline_destroy(internal)


class GPUCommandEncoder(base.GPUCommandEncoder):
    # wgpu.help('ComputePassDescriptor', 'commandencoderbegincomputepass', dev=True)
    def begin_compute_pass(self, *, label=""):
        struct = new_struct_p("WGPUComputePassDescriptor *", todo=0)
        raw_pass = _lib.wgpu_command_encoder_begin_compute_pass(self._internal, struct)
        return GPUComputePassEncoder(label, raw_pass, self)

    # wgpu.help('RenderPassDescriptor', 'commandencoderbeginrenderpass', dev=True)
    def begin_render_pass(
        self,
        *,
        label="",
        color_attachments: "GPURenderPassColorAttachmentDescriptor-list",
        depth_stencil_attachment: "GPURenderPassDepthStencilAttachmentDescriptor" = None,
        occlusion_query_set: "GPUQuerySet" = None,
    ):
        # Note that occlusion_query_set is ignored because wgpu-native does not have it.
github almarklein / wgpu-py / wgpu / backends / rs.py View on Github external
def _request_device(self, label, extensions, limits, trace_path):

        c_trace_path = ffi.NULL
        if trace_path:  # no-cover
            c_trace_path = ffi.new("char []", trace_path.encode())

        # Handle default limits
        _check_struct("Limits", limits)
        limits2 = base.default_limits.copy()
        limits2.update(limits or {})

        c_extensions = new_struct(
            "WGPUExtensions",
            anisotropic_filtering="anisotropic_filtering" in extensions,
        )
        c_limits = new_struct("WGPULimits", max_bind_groups=limits2["max_bind_groups"])
        struct = new_struct_p(
            "WGPUDeviceDescriptor *", extensions=c_extensions, limits=c_limits
        )
        device_id = _lib.wgpu_adapter_request_device(self._id, struct, c_trace_path)

        # Get the actual limits reported by the device
        c_limits = new_struct_p("WGPULimits *")
        _lib.wgpu_device_get_limits(device_id, c_limits)
        limits3 = {key: getattr(c_limits, key) for key in dir(c_limits)}