How to use dlr - 10 common examples

To help you get started, we’ve selected a few dlr examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github neo-ai / neo-ai-dlr / tests / python / unittest / test_tf_model.py View on Github external
def test_tf_model(dev_type=None, dev_id=None):
    _generate_frozen_graph()
    model = DLRModel(FROZEN_GRAPH_PATH, dev_type, dev_id)
    inp_names = model.get_input_names()
    assert inp_names == ['import/input1:0', 'import/input2:0']

    out_names = model.get_output_names()
    assert out_names == ['import/preproc/output1:0', 'import/preproc/output2:0']

    inp1 = [[4., 1.], [3., 2.]]
    inp2 = [[0., 1.], [1., 0.]]

    res = model.run({'import/input1:0': inp1, 'import/input2:0': inp2})
    assert res is not None
    assert len(res) == 2
    assert np.alltrue(res[0] == [[36., 361.], [49.,  324.]])
    assert res[1] == 1

    m_inp1 = model.get_input('import/input1:0')
github neo-ai / neo-ai-dlr / tests / python / integration / load_and_run_treelite_model.py View on Github external
def test_letor():
    model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'xgboost-letor')
    data_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'xgboost', 'letor.libsvm')
    model = DLRModel(model_path, 'cpu', 0)

    X, _ = load_svmlight_file(data_file, zero_based=True)
    expected = np.array([1.372033834457397461e+00, -2.448803186416625977e+00, 8.579480648040771484e-01,
                         1.369985580444335938e+00, -7.058695554733276367e-01, 4.134958684444427490e-01,
                         -2.247941017150878906e+00, -2.461995363235473633e+00, -2.394921064376831055e+00,
                         -1.191793322563171387e+00, 9.672126173973083496e-02, 2.687671184539794922e-01,
                         1.417675256729125977e+00, -1.832636356353759766e+00, -5.582004785537719727e-02,
                         -9.497703313827514648e-01, -1.219825387001037598e+00, 1.512521862983703613e+00,
                         -1.179921030998229980e-01, -2.383430719375610352e+00, -9.094548225402832031e-01])
    expected = expected.reshape((-1, 1))
    print('Testing inference on XGBoost LETOR...')
    assert np.allclose(model.run(_sparse_to_dense(X))[0], expected)
github neo-ai / neo-ai-dlr / tests / python / integration / load_and_run_treelite_model.py View on Github external
def test_mnist():
    model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'xgboost-mnist')
    data_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'xgboost', 'mnist.libsvm')
    model = DLRModel(model_path, 'cpu', 0)

    X, _ = load_svmlight_file(data_file, zero_based=True)
    print('Testing inference on XGBoost MNIST...')
    assert model.run(_sparse_to_dense(X))[0] == 7.0
github neo-ai / neo-ai-dlr / container / neo_template_mxnet_byom.py View on Github external
if script_name is None:
            raise RuntimeError('{} contains no *.py file'.format(source_tar))
        cur_dir = tempdir
        script_path = script_name[:-3]
        if '/' in script_path:
            file_depth = len(script_path.split('/')) - 1
            for i in range(file_depth):
                cur_dir = os.path.join(cur_dir, script_name[:-3].split('/')[i])
            script_path = script_path.split('/')[file_depth]
        self.user_module = import_user_module(cur_dir, script_path)

        USE_GPU = os.getenv('USE_GPU', None)
        if USE_GPU == '1':
            self.model = dlr.DLRModel(model_dir, dev_type='gpu')
        else:
            self.model = dlr.DLRModel(model_dir)
        self.input_names = self.model.get_input_names()
        self.initialized = True
github neo-ai / neo-ai-dlr / demo / aisage / run_yolo_gluoncv.py View on Github external
return class_IDs, scores, bounding_boxes


######################################################################
# Load, resize and normalize demo image
x = open_and_norm_image_cv2('street_small.jpg')
#x = open_and_norm_image_pil('street_small.jpg')

model_path = "models/yolov3_darknet53"
print(model_path)

dshape = (1, 3, 300, 300)
dtype = "float32"

device = 'opencl'
m = dlr.DLRModel(model_path, device)

# warmup
print("Warm up....")
run(m, x)

# run
N = 10
print("Run")
for i in range(N):
  t1 = ms()
  class_IDs, scores, bboxes = run(m, x)
  t2 = ms()
  print("Inference time: {:,} ms".format(t2 - t1))

######################################################################
# Dump results
github neo-ai / neo-ai-dlr / container / neo_template_image_classification.py View on Github external
# Load shape info
        self.shape_info = None
        for aux_file in glob.glob(os.path.join(model_dir, '*.json')):
            if os.path.basename(aux_file) == SHAPES_FILE:
                try:
                    with open(aux_file, 'r') as f:
                        self.shape_info = json.load(f)
                except Exception as e:
                    raise Exception('Error parsing shape info')
        if self.shape_info is None:
            raise Exception('Shape info must be given as {}'.format(SHAPES_FILE))

        USE_GPU = os.getenv('USE_GPU', None)
        if USE_GPU == '1':
            self.model = dlr.DLRModel(model_dir, dev_type='gpu')
        else:
            self.model = dlr.DLRModel(model_dir)
        self.input_names = self.model.get_input_names()
        self.initialized = True
github neo-ai / neo-ai-dlr / demo / aisage / run-mxnet-ssd-mobilenet-512.py View on Github external
img = np.expand_dims(img, axis=0)
    return orig_img, img

model_path = "models/mxnet-ssd-mobilenet-512"
print(model_path)

class_names = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair",
               "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant",
               "sheep", "sofa", "train", "tvmonitor"]

######################################################################
# Create TVM runtime and do inference

# Build DLR runtime
device = 'opencl'
m = dlr.DLRModel(model_path, device)

orig_img, img_data = open_and_norm_image(test_image)
input_data = img_data.astype(dtype)

# dryrun
print("Warming up...")
m.run(input_data)
# execute
print("Running...")
N = 10
for i in range(N):
    t1 = ms()
    m_out = m.run(input_data)
    # get outputs
    out = m_out[0][0]
    t2 = ms()
github neo-ai / neo-ai-dlr / container / neo_template_image_classification.py View on Github external
self.shape_info = None
        for aux_file in glob.glob(os.path.join(model_dir, '*.json')):
            if os.path.basename(aux_file) == SHAPES_FILE:
                try:
                    with open(aux_file, 'r') as f:
                        self.shape_info = json.load(f)
                except Exception as e:
                    raise Exception('Error parsing shape info')
        if self.shape_info is None:
            raise Exception('Shape info must be given as {}'.format(SHAPES_FILE))

        USE_GPU = os.getenv('USE_GPU', None)
        if USE_GPU == '1':
            self.model = dlr.DLRModel(model_dir, dev_type='gpu')
        else:
            self.model = dlr.DLRModel(model_dir)
        self.input_names = self.model.get_input_names()
        self.initialized = True
github neo-ai / neo-ai-dlr / demo / aisage / run-ssd.py View on Github external
score = scores[i][j]
        if score < 0.5:
            continue
        box = boxes[i][j]
        print("  ", cl_id, label, score, box)


inp_files = ['dogs.jpg']
inp = get_input(inp_files, (300, 300))

print("model:", model_path)
print(inp.shape, inp.dtype)

mem_usage()

m = dlr.DLRModel(model_path)
mem_usage()
print("input names:", m.get_input_names())
print("output names:", m.get_output_names())
# dryrun
print("dryrun...")
m.run({input_tensor_name: inp})
mem_usage()

N = 10
durations = []
for i in range(N):
    print(i+1, "m.run...")
    t1 = ms()
    res = m.run({input_tensor_name: inp})
    t2 = ms()
    mem_usage()
github neo-ai / neo-ai-dlr / demo / aisage / run-mxnet-ssd-resnet50-512.py View on Github external
img -= np.array([123, 117, 104])
    img = np.transpose(np.array(img), (2, 0, 1))
    img = np.expand_dims(img, axis=0)
    return orig_img, img

model_path = "models/mxnet-ssd-resnet50-512"
print(model_path)

class_names = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair",
               "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant",
               "sheep", "sofa", "train", "tvmonitor"]

######################################################################
# Load the model
device = 'opencl'
m = dlr.DLRModel(model_path, device)

orig_img, img_data = open_and_norm_image(test_image)
input_data = img_data.astype(dtype)

# dryrun
print("Warming up...")
m.run(input_data)
# execute
print("Running...")
N = 10
for i in range(N):
    t1 = ms()
    m_out = m.run(input_data)
    # get outputs
    out = m_out[0][0]
    t2 = ms()

dlr

Common runtime for machine learning models compiled by AWS SageMaker Neo, TVM, or TreeLite.

Apache-2.0
Latest version published 3 years ago

Package Health Score

51 / 100
Full package analysis

Popular dlr functions

Similar packages