How to use the wgpu.TextureDimension function in wgpu

To help you get started, we’ve selected a few wgpu examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github almarklein / wgpu-py / tests / test_rs_basics.py View on Github external
def test_write_texture1():
    device = wgpu.utils.get_default_device()

    nx, ny, nz = 100, 1, 1
    data1 = memoryview(np.random.random(size=100).astype(np.float32))
    bpp = data1.nbytes // (nx * ny * nz)
    texture_format = wgpu.TextureFormat.r32float
    texture_dim = wgpu.TextureDimension.d1

    # Create buffers and textures
    tex3 = device.create_texture(
        size=(nx, ny, nz),
        dimension=texture_dim,
        format=texture_format,
        usage=wgpu.TextureUsage.COPY_SRC | wgpu.TextureUsage.COPY_DST,
    )
    buf4 = device.create_buffer(
        size=data1.nbytes, usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.MAP_READ
    )

    # Upload from CPU to texture
    command_encoder = device.create_command_encoder()
    device.default_queue.write_texture(
        {"texture": tex3},
github almarklein / wgpu-py / tests / test_rs_render_tex.py View on Github external
def render_textured_square(fragment_shader, texture_format, texture_size, texture_data):
    """ Render, and test the result. The resulting image must be a
    gradient on R and B, zeros on G and ones on A.
    """
    nx, ny, nz = texture_size

    device = get_default_device()

    if can_use_vulkan_sdk:
        pyshader.dev.validate(vertex_shader)
        pyshader.dev.validate(fragment_shader)

    # Create texture
    texture = device.create_texture(
        size=(nx, ny, nz),
        dimension=wgpu.TextureDimension.d2,
        format=texture_format,
        usage=wgpu.TextureUsage.SAMPLED | wgpu.TextureUsage.COPY_DST,
    )
    upload_to_texture(device, texture, texture_data, nx, ny, nz)

    # texture_view = texture.create_view()
    # or:
    texture_view = texture.create_view(
        format=texture_format, dimension=wgpu.TextureDimension.d2,
    )
    # But not like these ...
    with raises(ValueError):
        texture_view = texture.create_view(dimension=wgpu.TextureDimension.d2,)
    with raises(ValueError):
        texture_view = texture.create_view(mip_level_count=1,)
github almarklein / wgpu-py / tests / test_rs_compute_tex.py View on Github external
color = tex1.read(index.x)
        color = ivec4(color.x + index.x, color.y + 1, color.z * 2, color.a)
        tex2.write(index.x, color)

    # Generate data
    nx, ny, nz, nc = 7, 1, 1, 4
    data1 = (ctypes.c_uint8 * nc * nx)()
    for x in range(nx):
        for c in range(nc):
            data1[x][c] = random.randint(0, 20)

    # Compute and validate
    _compute_texture(
        compute_shader,
        wgpu.TextureFormat.rgba8uint,
        wgpu.TextureDimension.d1,
        (nx, ny, nz, nc),
        data1,
    )
github almarklein / wgpu-py / tests / test_rs_render.py View on Github external
def cb(renderpass):
        renderpass.set_stencil_reference(42)

    # Bindings and layout
    bind_group_layout = device.create_bind_group_layout(entries=[])  # zero bindings
    bind_group = device.create_bind_group(layout=bind_group_layout, entries=[])
    pipeline_layout = device.create_pipeline_layout(
        bind_group_layouts=[bind_group_layout]
    )

    # Create dept-stencil texture
    depth_stencil_texture = device.create_texture(
        size=(64, 64, 1),  # when rendering to texture
        # size=(640, 480, 1),  # when rendering to screen
        dimension=wgpu.TextureDimension.d2,
        format=wgpu.TextureFormat.depth24plus_stencil8,
        usage=wgpu.TextureUsage.OUTPUT_ATTACHMENT,
    )

    depth_stencil_state = dict(
        format=wgpu.TextureFormat.depth24plus_stencil8,
        depth_write_enabled=True,
        depth_compare=wgpu.CompareFunction.less_equal,
        stencil_front={
            "compare": wgpu.CompareFunction.equal,
            "fail_op": wgpu.StencilOperation.keep,
            "depth_fail_op": wgpu.StencilOperation.keep,
            "pass_op": wgpu.StencilOperation.keep,
        },
        stencil_back={
            "compare": wgpu.CompareFunction.equal,
github almarklein / wgpu-py / tests / renderutils.py View on Github external
# https://github.com/gfx-rs/wgpu-rs/blob/master/examples/capture/main.rs

    vbos = vbos or []
    vbo_views = vbo_views or []

    # Select texture format. The srgb norm maps to the srgb colorspace which
    # appears to be the default for render pipelines https://en.wikipedia.org/wiki/SRGB
    texture_format = wgpu.TextureFormat.rgba8unorm  # rgba8unorm or bgra8unorm_srgb

    # Create texture to render to
    nx, ny, bpp = size[0], size[1], 4
    nbytes = nx * ny * bpp
    texture = device.create_texture(
        size=(nx, ny, 1),
        dimension=wgpu.TextureDimension.d2,
        format=texture_format,
        usage=wgpu.TextureUsage.OUTPUT_ATTACHMENT | wgpu.TextureUsage.COPY_SRC,
    )
    current_texture_view = texture.create_view()

    # Also a buffer to read the data to CPU
    buffer = device.create_buffer(
        size=nbytes, usage=wgpu.BufferUsage.MAP_READ | wgpu.BufferUsage.COPY_DST
    )

    vshader = device.create_shader_module(code=vertex_shader)
    fshader = device.create_shader_module(code=fragment_shader)

    render_pipeline = device.create_render_pipeline(
        layout=pipeline_layout,
        vertex_stage={"module": vshader, "entry_point": "main"},
github almarklein / wgpu-py / tests / test_rs_render_tex.py View on Github external
pyshader.dev.validate(vertex_shader)
        pyshader.dev.validate(fragment_shader)

    # Create texture
    texture = device.create_texture(
        size=(nx, ny, nz),
        dimension=wgpu.TextureDimension.d2,
        format=texture_format,
        usage=wgpu.TextureUsage.SAMPLED | wgpu.TextureUsage.COPY_DST,
    )
    upload_to_texture(device, texture, texture_data, nx, ny, nz)

    # texture_view = texture.create_view()
    # or:
    texture_view = texture.create_view(
        format=texture_format, dimension=wgpu.TextureDimension.d2,
    )
    # But not like these ...
    with raises(ValueError):
        texture_view = texture.create_view(dimension=wgpu.TextureDimension.d2,)
    with raises(ValueError):
        texture_view = texture.create_view(mip_level_count=1,)

    sampler = device.create_sampler(mag_filter="linear", min_filter="linear")

    # Determine texture component type from the format
    if texture_format.endswith(("norm", "float")):
        texture_component_type = wgpu.TextureComponentType.float
    elif "uint" in texture_format:
        texture_component_type = wgpu.TextureComponentType.uint
    else:
        texture_component_type = wgpu.TextureComponentType.sint
github almarklein / wgpu-py / tests / test_rs_compute_tex.py View on Github external
tex2.write(index.xyz, color)

    # Generate data
    nx, ny, nz, nc = 7, 8, 6, 1
    data1 = (ctypes.c_float * nc * nx * ny * nz)()
    for z in range(nz):
        for y in range(ny):
            for x in range(nx):
                for c in range(nc):
                    data1[z][y][x][c] = random.randint(0, 20)

    # Compute and validate
    _compute_texture(
        compute_shader,
        wgpu.TextureFormat.r32float,
        wgpu.TextureDimension.d3,
        (nx, ny, nz, nc),
        data1,
    )
github almarklein / wgpu-py / tests / test_rs_basics.py View on Github external
dimension=texture_dim,
        format=texture_format,
        usage=wgpu.TextureUsage.COPY_SRC | wgpu.TextureUsage.COPY_DST,
    )
    buf4 = device.create_buffer(
        size=nbytes, usage=wgpu.BufferUsage.COPY_SRC | wgpu.BufferUsage.COPY_DST
    )
    buf5 = device.create_buffer(
        size=nbytes, usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.MAP_READ
    )

    # Check texture stats
    assert tex2.texture_size == (nx, ny, nz)
    assert tex2.mip_level_count == 1
    assert tex2.sample_count == 1
    assert tex2.dimension == wgpu.TextureDimension.d1
    assert tex2.format == texture_format
    assert tex2.texture_usage == wgpu.TextureUsage.COPY_SRC | wgpu.TextureUsage.COPY_DST
    assert tex2.create_view().texture is tex2

    # Upload from CPU to buffer
    # assert buf1.state == "unmapped"
    # mapped_data = buf1.map(wgpu.MapMode.WRITE)
    # assert buf1.state == "mapped"
    # mapped_data.cast("f")[:] = data1
    # buf1.unmap()
    # assert buf1.state == "unmapped"
    buf1.write_data(data1)

    # Copy from buffer to texture
    command_encoder = device.create_command_encoder()
    command_encoder.copy_buffer_to_texture(
github almarklein / wgpu-py / examples / cube_glfw.py View on Github external
# Create index buffer, and upload data
index_buffer = device.create_buffer_with_data(
    data=index_data, usage=wgpu.BufferUsage.INDEX
)

# Create uniform buffer - data is uploaded each frame
uniform_buffer = device.create_buffer(
    size=uniform_data.nbytes, usage=wgpu.BufferUsage.UNIFORM | wgpu.BufferUsage.COPY_DST
)


# Create texture, and upload data
texture = device.create_texture(
    size=texture_size,
    usage=wgpu.TextureUsage.COPY_DST | wgpu.TextureUsage.SAMPLED,
    dimension=wgpu.TextureDimension.d2,
    format=wgpu.TextureFormat.r8uint,
    mip_level_count=1,
    sample_count=1,
)
texture_view = texture.create_view()
tmp_buffer = device.create_buffer_with_data(
    data=texture_data, usage=wgpu.BufferUsage.COPY_SRC
)
command_encoder = device.create_command_encoder()
command_encoder.copy_buffer_to_texture(
    {
        "buffer": tmp_buffer,
        "offset": 0,
        "bytes_per_row": texture_data.strides[0],
        "rows_per_image": 0,
    },