How to use the rospkg.RosPack function in rospkg

To help you get started, we’ve selected a few rospkg examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github rapp-project / rapp-platform / rapp_testing_tools / scripts / default_tests / exclude / speech_detection_sphinx_test_headset_deutera.py View on Github external
def __init__(self):
    rospack = rospkg.RosPack()
    pkgDir = rospack.get_path('rapp_testing_tools')
    audioFile = path.join(pkgDir, 'test_data', 'deutera.wav')

    self.msg = SpeechRecognitionSphinx(
        language='el',
        audio_source='headset',
        words=[u'Δευτέρα'],
        sentences=[u'Δευτέρα'],
        grammar=[u'Δευτέρα'],
        audiofile=audioFile)

    self.svc = RappPlatformService(self.msg)

    self.valid_words_found = [u'Δευτέρα']
github rapp-project / rapp-platform / rapp_testing_tools / scripts / default_tests / exclude / qr_detection_test_hard_medium.py View on Github external
def __init__(self):
    rospack = rospkg.RosPack()
    pkgDir = rospack.get_path('rapp_testing_tools')
    imagepath = path.join(pkgDir, 'test_data','qr_samples', 'hardMediumQr.jpg')

    self.msg = QrDetection(imageFilepath=imagepath)
    self.svc = RappPlatformService(msg=self.msg)

    self.valid_results = {
        'qr_centers': [{'y': 543, 'x': 661}],
        'qr_messages': ['This QR will be used to check the algorithmic robustness as it is quite small'],
        'error': ''
    }
github jredmondson / gams / scripts / linux / generate_schemas.py View on Github external
def get_rosmsg_paths(types):
    """
    Gets a list of ROS message paths as they exist on your system

    Args:
        types: A list of ROS Message types in the format package/Type

    Return:
        A list of ROS message types as a map of map[type] = path
    """

    rospack = rospkg.RosPack()
    found_pkgs = rosmsg.iterate_packages(rospack, '.msg')

    rosmsg_type_path_map = {}
    dir_map = {}
    type_map = {}

    # This associates a type with a package name.
    for full_type in types:
        package_name = full_type.split('/')[0]
        print "Searching package %s ... " % package_name
        base_type = full_type.split('/')[1]
        type_map[base_type] = package_name
        
    # This associates a package name with a msg path
    for p, path in found_pkgs:
        #print "Msg path for package %s found at %s" % (p, path)
github olinrobotics / irl / scripts / Interactions / _oldInteractions / emotive_brain.py View on Github external
def create_behaviors(self):
        rospack = rospkg.RosPack()
        curr_dir = rospack.get_path("edwin")

        self.behaviors = pickle.load(open(curr_dir+'/params/'+str(self.personality)+'_behaviors.txt'))

        self.cat_behaviors['negative_emotions'] = ['angry', 'sad']
        self.cat_behaviors['happy_emotions'] = ['nod', 'butt_wiggle']
        self.cat_behaviors['greeting'] = ['greet', 'nudge', 'curiosity']
        self.cat_behaviors['pretentious']  = ['gloat']
        self.cat_behaviors['calm'] = ['sleep', 'nudge', 'nod']

        self.pat = False
        self.slap = False

        self.exit = False #should be catch all to exit all long running commands
        self.start_game = None
github fpt-corp / DiRa / Reference / Source code final 2018-2019 / MTA-R4F-Digital_race_2019 / src_25_5_13_56 / r4f / src / modules / Camera.py View on Github external
import cv2
import numpy as np
import rospkg 
from primesense import openni2#, nite2
from primesense import _openni2 as c_api
import config as cf
import time

rospack = rospkg.RosPack()
path = rospack.get_path('r4f')
openni2.initialize(path+'/src/modules') #
dev = openni2.Device.open_any()
rgb_stream = dev.create_color_stream()
rgb_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX=640, resolutionY=480, fps=30))
rgb_stream.start()
depth_stream = dev.create_depth_stream()
depth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat = c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM, resolutionX = 320, resolutionY = 240, fps = 30))
depth_stream.start()

def get_rgb():
    print("Get_rbg started!")
    while cf.running:
        bgr   = np.fromstring(rgb_stream.read_frame().get_buffer_as_uint8(),dtype=np.uint8).reshape(480,640,3)
        rgb   = cv2.cvtColor(bgr,cv2.COLOR_BGR2RGB)
        rgb = cv2.resize(rgb,(480, 320))
github nickcharron / waypoint_nav / outdoor_waypoint_nav / scripts / joy_launch_control_continuous.py View on Github external
def getPaths():
    global location_collect
    global location_send
    global location_calibrate
    global location_safety_node
    rospack = rospkg.RosPack()
    
    # Define location of launch files
    if sim_enabled == True:
        location_collect = rospack.get_path('outdoor_waypoint_nav') + "/launch/include/collect_goals_sim.launch"
        location_send = rospack.get_path('outdoor_waypoint_nav') + "/launch/include/send_goals_continuous_sim.launch"
        location_calibrate = rospack.get_path('outdoor_waypoint_nav') + "/launch/include/heading_calibration_sim.launch"
        location_safety_node = rospack.get_path('outdoor_waypoint_nav') + "/launch/include/safety_node.launch"

    elif sim_enabled == False:
        location_collect = rospack.get_path('outdoor_waypoint_nav') + "/launch/include/collect_goals.launch"
        location_send = rospack.get_path('outdoor_waypoint_nav') + "/launch/include/send_goals_continuous.launch"
        location_calibrate = rospack.get_path('outdoor_waypoint_nav') + "/launch/include/heading_calibration.launch"
        location_safety_node = rospack.get_path('outdoor_waypoint_nav') + "/launch/include/safety_node.launch"

    else:
        print("ERROR: PLEASE SPECIFY SIM_ENABLED PARAMETER.")
github RightHandRobotics / reflex-ros-pkg / reflex / src / reflex / reflex_usb_hand.py View on Github external
def _write_zero_point_data_to_file(self, filename, data):
        rospack = rospkg.RosPack()
        reflex_usb_path = rospack.get_path("reflex")
        yaml_path = "yaml"
        file_path = join(reflex_usb_path, yaml_path, filename)
        with open(file_path, "w") as outfile:
            outfile.write(yaml.dump(data))
github OpenPTrack / open_ptrack_v2 / recognition / scripts / face_detection_node.py View on Github external
def cfg_callback(self, config, level):
		package_path = rospkg.RosPack().get_path('recognition')
		self.face_detector_path = package_path + config.face_detector_path		# the path to the face detector model file
		self.confidence_thresh = config.confidence_thresh				# the threshold for confidence of face detection
		self.roi_width = config.roi_width_								# the width of a face detection ROI in the world space [m]
		self.calc_roi_from_top = config.calc_roi_from_top				# if true, ROIs are calculated from the top positions of detected clusters
		self.head_offset_z_top = config.head_offset_z_top				# the distance between the top position of a human cluster and the center of the face [m]
		self.head_offset_z_centroid = config.head_offset_z_centroid		# the distance between the centroid of a human cluster and the center of the face [m]
		self.upscale_minsize = config.upscale_minsize					# the face detection ROI is upscaled so that its width get larger than #upscale_minsize
		self.visualization = config.visualization						# if true, the visualization of the detection will be shown

		print '--- cfg_callback ---'
		print 'confidence_thresh', config.confidence_thresh
		print 'roi_width', config.roi_width_
		print 'calc_roi_from_top', config.calc_roi_from_top
		print 'head_offset_z_top', config.head_offset_z_top
		print 'head_offset_z_centroid', config.head_offset_z_centroid
		print 'upscale_minsize', config.upscale_minsize
github ymollard / APEX / ros / apex_playground / src / apex_playground / learning / translator.py View on Github external
def __init__(self):
        self.rospack = RosPack()
        with open(join(self.rospack.get_path('apex_playground'), 'config', 'bounds.json')) as f:
            self.bounds = json.load(f)
        with open(join(self.rospack.get_path('apex_playground'), 'config', 'general.json')) as f:
            self.params = json.load(f)
        self.bounds_motors_min = np.array([float(bound[0]) for bound in self.bounds['motors']['positions']])
        self.bounds_motors_max = np.array([float(bound[1]) for bound in self.bounds['motors']['positions']])
        self.bounds_sensory_min = [d for space in ['hand', 'joystick_1', 'joystick_2', 'ergo', 'ball', 'light', 'sound', "hand_right", "base", "arena", "obj1", "obj2", "obj3", "rdm1", "rdm2"] for d in [float(bound[0])for bound in self.bounds['sensory'][space]]*10]
        self.bounds_sensory_min = np.array([float(self.bounds['sensory']['ergo'][0][0]), float(self.bounds['sensory']['ball'][0][0])] + self.bounds_sensory_min)
        self.bounds_sensory_max = [d for space in ['hand', 'joystick_1', 'joystick_2', 'ergo', 'ball', 'light', 'sound', "hand_right", "base", "arena", "obj1", "obj2", "obj3", "rdm1", "rdm2"] for d in [float(bound[1])for bound in self.bounds['sensory'][space]]*10]
        self.bounds_sensory_max = np.array([float(self.bounds['sensory']['ergo'][0][1]), float(self.bounds['sensory']['ball'][0][1])] + self.bounds_sensory_max)
        self.bounds_sensory_diff = self.bounds_sensory_max - self.bounds_sensory_min

        # DMP PARAMETERS
        self.n_dmps = 4
        self.n_bfs = 7
        self.timesteps = 30