How to use tfx - 10 common examples

To help you get started, we’ve selected a few tfx examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github jeffmahler / GPIS / src / grasp_selection / test_zeke_registration.py View on Github external
T_world = stf.SimilarityTransform3D(from_frame='world', to_frame='world')
    R_table_world = np.eye(3)
    T_table_world = stf.SimilarityTransform3D(pose=tfx.pose(R_table_world, np.zeros(3)), from_frame='world', to_frame='table')
            
    R_camera_table = np.load('data/calibration/rotation_camera_cb.npy')
    t_camera_table = np.load('data/calibration/translation_camera_cb.npy')
    cb_points_camera = np.load('data/calibration/corners_cb.npy')
    T_camera_table = stf.SimilarityTransform3D(tfx.pose(R_camera_table, t_camera_table), from_frame='table', to_frame='camera')
    T_camera_world = T_camera_table.dot(T_table_world)
    T_world_camera = T_camera_world.inverse()
    
    R_stp_obj = stable_pose.r
    T_obj_stp = stf.SimilarityTransform3D(pose=tfx.pose(R_stp_obj.T, np.zeros(3)), from_frame='stp', to_frame='obj')
            
    t_stp_table = np.array([0, 0, z])
    T_stp_table = stf.SimilarityTransform3D(pose=tfx.pose(np.eye(3), t_stp_table), from_frame='table', to_frame='stp')
    
    T_obj_world = T_obj_camera.dot(T_camera_world)
            
    # visualize the robot's understanding of the world
    logging.info('Displaying robot world state')
    mv.clf()
    mvis.MayaviVisualizer.plot_table(T_table_world, d=table_extent)
    mvis.MayaviVisualizer.plot_pose(T_world, alpha=alpha, tube_radius=tube_radius, center_scale=center_scale)
    mvis.MayaviVisualizer.plot_pose(T_obj_world, alpha=alpha, tube_radius=tube_radius, center_scale=center_scale)
    mvis.MayaviVisualizer.plot_pose(T_camera_world, alpha=alpha, tube_radius=tube_radius, center_scale=center_scale)
    mvis.MayaviVisualizer.plot_mesh(object_mesh, T_obj_world)
    mvis.MayaviVisualizer.plot_point_cloud(cb_points_camera, T_world_camera, color=(1,1,0))
    mvis.MayaviVisualizer.plot_point_cloud(points_3d, T_world_camera, color=(0,1,0), scale=0.0025)
    mv.view(focalpoint=(0,0,0))
    mv.show()
github jeffmahler / GPIS / src / grasp_selection / test_zeke_registration.py View on Github external
T_camera_obj.to_frame = 'camera'
    T_obj_camera = T_camera_obj.inverse()    

    # save depth and color images
    min_d = np.min(depth_im)
    max_d = np.max(depth_im)
    depth_im2 = 255.0 * (depth_im - min_d) / (max_d - min_d)
    depth_im2 = Image.fromarray(depth_im2.astype(np.uint8))
    filename = 'depth.png'
    depth_im2.save(os.path.join(logging_dir, filename))
    color_im2 = Image.fromarray(color_im)
    filename = 'color.png'
    color_im2.save(os.path.join(logging_dir, filename))

    # transform the mesh to the stable pose to get a z offset from the table
    T_obj_stp = stf.SimilarityTransform3D(pose=tfx.pose(stable_pose.r)) 
    object_mesh = graspable.mesh
    object_mesh_tf = object_mesh.transform(T_obj_stp)
    mn, mx = object_mesh_tf.bounding_box()
    z = mn[2]
    
    # define poses of camera, table, object, tec
    T_world = stf.SimilarityTransform3D(from_frame='world', to_frame='world')
    R_table_world = np.eye(3)
    T_table_world = stf.SimilarityTransform3D(pose=tfx.pose(R_table_world, np.zeros(3)), from_frame='world', to_frame='table')
            
    R_camera_table = np.load('data/calibration/rotation_camera_cb.npy')
    t_camera_table = np.load('data/calibration/translation_camera_cb.npy')
    cb_points_camera = np.load('data/calibration/corners_cb.npy')
    T_camera_table = stf.SimilarityTransform3D(tfx.pose(R_camera_table, t_camera_table), from_frame='table', to_frame='camera')
    T_camera_world = T_camera_table.dot(T_table_world)
    T_world_camera = T_camera_world.inverse()
github jeffmahler / GPIS / src / grasp_selection / test_zeke_registration.py View on Github external
# transform the mesh to the stable pose to get a z offset from the table
    T_obj_stp = stf.SimilarityTransform3D(pose=tfx.pose(stable_pose.r)) 
    object_mesh = graspable.mesh
    object_mesh_tf = object_mesh.transform(T_obj_stp)
    mn, mx = object_mesh_tf.bounding_box()
    z = mn[2]
    
    # define poses of camera, table, object, tec
    T_world = stf.SimilarityTransform3D(from_frame='world', to_frame='world')
    R_table_world = np.eye(3)
    T_table_world = stf.SimilarityTransform3D(pose=tfx.pose(R_table_world, np.zeros(3)), from_frame='world', to_frame='table')
            
    R_camera_table = np.load('data/calibration/rotation_camera_cb.npy')
    t_camera_table = np.load('data/calibration/translation_camera_cb.npy')
    cb_points_camera = np.load('data/calibration/corners_cb.npy')
    T_camera_table = stf.SimilarityTransform3D(tfx.pose(R_camera_table, t_camera_table), from_frame='table', to_frame='camera')
    T_camera_world = T_camera_table.dot(T_table_world)
    T_world_camera = T_camera_world.inverse()
    
    R_stp_obj = stable_pose.r
    T_obj_stp = stf.SimilarityTransform3D(pose=tfx.pose(R_stp_obj.T, np.zeros(3)), from_frame='stp', to_frame='obj')
            
    t_stp_table = np.array([0, 0, z])
    T_stp_table = stf.SimilarityTransform3D(pose=tfx.pose(np.eye(3), t_stp_table), from_frame='table', to_frame='stp')
    
    T_obj_world = T_obj_camera.dot(T_camera_world)
            
    # visualize the robot's understanding of the world
    logging.info('Displaying robot world state')
    mv.clf()
    mvis.MayaviVisualizer.plot_table(T_table_world, d=table_extent)
    mvis.MayaviVisualizer.plot_pose(T_world, alpha=alpha, tube_radius=tube_radius, center_scale=center_scale)
github tensorflow / tfx / tfx / tools / cli / testdata / test_pipeline_kubeflow_1.py View on Github external
def _create_pipeline():
  pipeline_name = _PIPELINE_NAME
  test_output_dir = 'gs://{}/test_output'.format(test_utils.BUCKET_NAME)
  pipeline_root = os.path.join(test_output_dir, pipeline_name)
  components = test_utils.create_e2e_components(pipeline_root,
                                                test_utils.DATA_ROOT,
                                                test_utils.TAXI_MODULE_FILE)
  return tfx_pipeline.Pipeline(
      pipeline_name=pipeline_name,
      pipeline_root=pipeline_root,
      metadata_connection_config=metadata_store_pb2.ConnectionConfig(),
      components=components,
      log_root='/var/tmp/tfx/logs',
      additional_pipeline_args={
          'WORKFLOW_ID': pipeline_name,
      },
github tensorflow / tfx / tfx / tools / cli / testdata / test_pipeline_kubeflow_3.py View on Github external
def _create_pipeline():
  pipeline_name = _PIPELINE_NAME
  test_output_dir = 'gs://{}/test_output'.format(test_utils.BUCKET_NAME)
  pipeline_root = os.path.join(test_output_dir, pipeline_name)
  components = test_utils.create_e2e_components(pipeline_root,
                                                test_utils.DATA_ROOT,
                                                test_utils.TAXI_MODULE_FILE)
  return tfx_pipeline.Pipeline(
      pipeline_name=pipeline_name,
      pipeline_root=pipeline_root,
      metadata_connection_config=metadata_store_pb2.ConnectionConfig(),
      components=components[:4],
      log_root='/var/tmp/tfx/logs',
      additional_pipeline_args={
          'WORKFLOW_ID': pipeline_name,
      },
github tensorflow / tfx / tfx / tools / cli / testdata / test_pipeline_beam_1.py View on Github external
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
                     metadata_path: Text) -> pipeline.Pipeline:
  """Implements the chicago taxi pipeline with TFX."""
  examples = external_input(data_root)

  # Brings data into the pipeline or otherwise joins/converts training data.
  example_gen = CsvExampleGen(input=examples)

  # Computes statistics over data for visualization and example validation.
  statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])

  # Generates schema based on statistics files.
  infer_schema = SchemaGen(statistics=statistics_gen.outputs['statistics'])

  return pipeline.Pipeline(
      pipeline_name=pipeline_name,
      pipeline_root=pipeline_root,
      components=[example_gen, statistics_gen, infer_schema],
      enable_cache=True,
      metadata_connection_config=metadata.sqlite_metadata_connection_config(
          metadata_path),
      additional_pipeline_args={},
  )
github tensorflow / tfx / tfx / tools / cli / testdata / test_pipeline_airflow_2.py View on Github external
def _create_pipeline():
  """Implements the chicago taxi pipeline with TFX."""
  examples = csv_input(_data_root)

  # Brings data into the pipeline or otherwise joins/converts training data.
  example_gen = CsvExampleGen(input=examples)

  # Computes statistics over data for visualization and example validation.
  statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])

  # Generates schema based on statistics files.
  infer_schema = SchemaGen(statistics=statistics_gen.outputs['statistics'])

  return pipeline.Pipeline(
      pipeline_name='chicago_taxi_simple',
      pipeline_root=_pipeline_root,
      components=[
          example_gen, statistics_gen, infer_schema
      ],
      enable_cache=True,
      metadata_db_root=_metadata_db_root,
  )
github tensorflow / tfx / tfx / orchestration / kubeflow / test_utils.py View on Github external
schema=infer_schema.outputs['schema'])
  transform = Transform(
      examples=example_gen.outputs['examples'],
      schema=infer_schema.outputs['schema'],
      module_file=taxi_module_file)
  trainer = Trainer(
      module_file=taxi_module_file,
      transformed_examples=transform.outputs['transformed_examples'],
      schema=infer_schema.outputs['schema'],
      transform_graph=transform.outputs['transform_graph'],
      train_args=trainer_pb2.TrainArgs(num_steps=10),
      eval_args=trainer_pb2.EvalArgs(num_steps=5))
  model_analyzer = Evaluator(
      examples=example_gen.outputs['examples'],
      model_exports=trainer.outputs['model'],
      feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[
          evaluator_pb2.SingleSlicingSpec(
              column_for_slicing=['trip_start_hour'])
      ]))
  model_validator = ModelValidator(
      examples=example_gen.outputs['examples'], model=trainer.outputs['model'])
  pusher = Pusher(
      model=trainer.outputs['model'],
      model_blessing=model_validator.outputs['blessing'],
      push_destination=pusher_pb2.PushDestination(
          filesystem=pusher_pb2.PushDestination.Filesystem(
              base_directory=os.path.join(pipeline_root, 'model_serving'))))

  return [
      example_gen, statistics_gen, infer_schema, validate_stats, transform,
      trainer, model_analyzer, model_validator, pusher
  ]
github tensorflow / tfx / tfx / tools / cli / e2e / test_utils.py View on Github external
transform_output=transform.outputs['transform_output'],
      train_args=trainer_pb2.TrainArgs(num_steps=10),
      eval_args=trainer_pb2.EvalArgs(num_steps=5))
  model_analyzer = Evaluator(
      examples=example_gen.outputs['examples'],
      model_exports=trainer.outputs['output'],
      feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[
          evaluator_pb2.SingleSlicingSpec(
              column_for_slicing=['trip_start_hour'])
      ]))
  model_validator = ModelValidator(
      examples=example_gen.outputs['examples'], model=trainer.outputs['output'])
  pusher = Pusher(
      model_export=trainer.outputs['output'],
      model_blessing=model_validator.outputs['blessing'],
      push_destination=pusher_pb2.PushDestination(
          filesystem=pusher_pb2.PushDestination.Filesystem(
              base_directory=os.path.join(pipeline_root, 'model_serving'))))

  return [
      example_gen, statistics_gen, infer_schema, validate_stats, transform,
      trainer, model_analyzer, model_validator, pusher
  ]
github tensorflow / tfx / tfx / tools / cli / e2e / test_utils.py View on Github external
transform = Transform(
      input_data=example_gen.outputs['examples'],
      schema=infer_schema.outputs['output'],
      module_file=taxi_module_file)
  trainer = Trainer(
      module_file=taxi_module_file,
      transformed_examples=transform.outputs['transformed_examples'],
      schema=infer_schema.outputs['output'],
      transform_output=transform.outputs['transform_output'],
      train_args=trainer_pb2.TrainArgs(num_steps=10),
      eval_args=trainer_pb2.EvalArgs(num_steps=5))
  model_analyzer = Evaluator(
      examples=example_gen.outputs['examples'],
      model_exports=trainer.outputs['output'],
      feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[
          evaluator_pb2.SingleSlicingSpec(
              column_for_slicing=['trip_start_hour'])
      ]))
  model_validator = ModelValidator(
      examples=example_gen.outputs['examples'], model=trainer.outputs['output'])
  pusher = Pusher(
      model_export=trainer.outputs['output'],
      model_blessing=model_validator.outputs['blessing'],
      push_destination=pusher_pb2.PushDestination(
          filesystem=pusher_pb2.PushDestination.Filesystem(
              base_directory=os.path.join(pipeline_root, 'model_serving'))))

  return [
      example_gen, statistics_gen, infer_schema, validate_stats, transform,
      trainer, model_analyzer, model_validator, pusher
  ]