How to use the taichi.var function in taichi

To help you get started, we’ve selected a few taichi examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github yuanming-hu / taichi / tests / python / test_struct.py View on Github external
def test_linear_nested():
  x = ti.var(ti.i32)
  y = ti.var(ti.i32)

  n = 128

  @ti.layout
  def place():
    ti.root.dense(ti.i, n // 16).dense(ti.i, 16).place(x)
    ti.root.dense(ti.i, n // 16).dense(ti.i, 16).place(y)

  for i in range(n):
    x[i] = i
    y[i] = i + 123

  for i in range(n):
    assert x[i] == i
    assert y[i] == i + 123
github yuanming-hu / taichi / examples / taichi_logo.py View on Github external
if inside(p, Vector2(0.50, 0.25), 0.25):
    if ret == -1:
      ret = 0
  if inside(p, Vector2(0.50, 0.75), 0.25):
    if ret == -1:
      ret = 1
  if p[0] < 0.5:
    if ret == -1:
      ret = 1
  else:
    if ret == -1:
      ret = 0
  return ret


x = ti.var(ti.f32)

n = 512
ti.cfg.use_llvm = True


@ti.layout
def layout():
  ti.root.dense(ti.ij, n).place(x)


@ti.kernel
def paint():
  for i in range(n * 4):
    for j in range(n * 4):
      ret = 1.0 - inside_taichi(Vector2(1.0 * i / n / 4, 1.0 * j / n / 4))
      x[i // 4, j // 4] += ret / 16
github yuanming-hu / taichi / tests / python / test_ad_atomic.py View on Github external
def test_ad_reduce():
  x = ti.var(ti.f32)
  loss = ti.var(ti.f32)

  N = 16

  @ti.layout
  def place():
    ti.root.place(loss, loss.grad).dense(ti.i, N).place(x, x.grad)

  @ti.kernel
  def func():
    for i in x:
      loss.atomic_add(ti.sqr(x[i]))

  total_loss = 0
  for i in range(N):
    x[i] = i
    total_loss += i * i
github yuanming-hu / taichi / tests / python / test_basics.py View on Github external
def test_simple():
  x = ti.var(ti.i32)

  n = 128

  @ti.layout
  def place():
    ti.root.dense(ti.i, n).place(x)

  @ti.kernel
  def func():
    x[7] = 120

  func()

  for i in range(n):
    if i == 7:
      assert x[i] == 120
github yuanming-hu / taichi / tests / python / test_complex_kernels.py View on Github external
def __init__(self):
      self.x = ti.var(ti.f32)
      self.total = ti.var(ti.f32)
      self.n = 128
github yuanming-hu / taichi / tests / python / test_arg_load.py View on Github external
def test_arg_load():
  x = ti.var(ti.i32)
  y = ti.var(ti.f32)

  @ti.layout
  def layout():
    ti.root.place(x, y)

  @ti.kernel
  def set_i32(v: ti.i32):
    x[None] = v

  @ti.kernel
  def set_f32(v: ti.f32):
    y[None] = v

  @ti.kernel
  def set_f64(v: ti.f64):
github yuanming-hu / taichi / tests / python / test_kernel_templates.py View on Github external
def test_kernel_template_basic():
  x = ti.var(ti.i32)
  y = ti.var(ti.f32)

  n = 16

  @ti.layout
  def layout():
    ti.root.dense(ti.i, n).place(x, y)

  @ti.kernel
  def inc(a: ti.template(), b: ti.template()):
    for i in a:
      a[i] += b

  inc(x, 1)
  inc(y, 2)

  for i in range(n):
github yuanming-hu / taichi / examples / mlp.py View on Github external
import numpy as np
import random
import taichi as ti
import pickle

# ti.runtime.print_preprocessed = True
# ti.cfg.print_ir = True

input = ti.var(ti.f32)

weight1 = ti.var(ti.f32)
output1 = ti.var(ti.f32)
output1_nonlinear = ti.var(ti.f32)

weight2 = ti.var(ti.f32)
output = ti.var(ti.f32)
output_exp = ti.var(ti.f32)
output_softmax = ti.var(ti.f32)
softmax_sum = ti.var(ti.f32)
gt = ti.var(ti.f32)
loss = ti.var(ti.f32)
learning_rate = ti.var(ti.f32)

n_input = 28**2
n_hidden = 500
n_output = 10
github yuanming-hu / taichi / examples / difftaichi / rigid_body_discountinuity.py View on Github external
scalar = lambda: ti.var(dt=real)
vec = lambda: ti.Vector(2, dt=real)