How to use the pybullet.stepSimulation function in pybullet

To help you get started, we’ve selected a few pybullet examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github BlackToppStudios / Mezzanine / Mezzanine / libincludes / common / bulletsrc / bulletsvnsrc / examples / pybullet / examples / reset_dynamic_info.py View on Github external
cubeId = p.loadURDF(fileName="cube.urdf",baseOrientation=[0,0,0,1],basePosition=[0,0,4])
#p.changeDynamics(bodyUniqueId=2,linkIndex=-1,mass=0.1)
p.changeDynamics(bodyUniqueId=2,linkIndex=-1,mass=100.0)
p.setGravity(0,0,-10)
p.setRealTimeSimulation(0)
t=0
while 1:
	t=t+1
	if t > 400:
		p.changeDynamics(bodyUniqueId=0,linkIndex=-1,lateralFriction=0.01)
	mass1,frictionCoeff1=p.getDynamicsInfo(bodyUniqueId=planeId,linkIndex=-1)
	mass2,frictionCoeff2=p.getDynamicsInfo(bodyUniqueId=cubeId,linkIndex=-1)
	print mass1,frictionCoeff1
	print mass2,frictionCoeff2
	time.sleep(.01)
	p.stepSimulation()
github araffin / robotics-rl-srl / environments / KukaObjects.py View on Github external
# Randomize positions of each object urdf.
        objectUids = []
        for urdf_name in urdfList:
            xpos = 0.4 + self._blockRandom * random.random()
            ypos = self._blockRandom * (random.random() - .5)
            angle = np.pi / 2 + self._blockRandom * np.pi * random.random()
            orn = p.getQuaternionFromEuler([0, 0, angle])
            urdf_path = os.path.join(self._urdfRoot, urdf_name)
            uid = p.loadURDF(urdf_path, [xpos, ypos, .15],
                             [orn[0], orn[1], orn[2], orn[3]])
            objectUids.append(uid)
            # Let each object fall to the tray individual, to prevent object
            # intersection.
            for _ in range(500):
                p.stepSimulation()
        return objectUids
github araffin / robotics-rl-srl / environments / KukaObjects.py View on Github external
self._attempted_grasp = False
        self._env_step = 0
        self.terminated = 0

        p.resetSimulation()
        p.setPhysicsEngineParameter(numSolverIterations=150)
        p.setTimeStep(self._timeStep)
        p.loadURDF(os.path.join(self._urdfRoot, "plane.urdf"), [0, 0, -1])

        p.loadURDF(os.path.join(self._urdfRoot, "table/table.urdf"), 0.5000000, 0.00000, -.820000, 0.000000, 0.000000,
                   0.0, 1.0)

        p.setGravity(0, 0, -10)
        self._kuka = kuka.Kuka(urdfRootPath=self._urdfRoot, timeStep=self._timeStep)
        self._envStepCounter = 0
        p.stepSimulation()

        # Choose the objects in the bin.
        urdfList = self._get_random_object(
            self._numObjects, self._isTest)
        self._objectUids = self._randomly_place_objects(urdfList)
        self._observation = self._get_observation()
        return np.array(self._observation)
github benelot / bullet-gym / bullet-gym-primitive / envs / MJCFReacherv0Env.py View on Github external
assert( np.isfinite(action).all() )
        
        if self.done:
            print >>sys.stderr, "Why is step called when the episode is done?"
            return np.copy(self.state), 0, True, {}
        # choose your next action
        # do some out of bounds checks to reset the environment and agent
        # calculate the reward for your agent
 
        info = {}
 
        # step simulation forward. at the end of each repeat we set part of the step's
        # state by capture parts' states in some form.
        for r in xrange(self.repeats):
            for _ in xrange(self.steps_per_repeat):
                p.stepSimulation()
                self.central_joint.set_torque( 0.07*float(np.clip(action[0], -1, +1)) )
                self.elbow_joint.set_torque( 0.07*float(np.clip(action[1], -1, +1)) )
                if self.delay > 0:
                    time.sleep(self.delay)
            self.set_state_element_for_repeat(r)
        self.steps += 1

        # check for end of episode (by length)
        if self.steps >= self.max_episode_len:
            info['done_reason'] = 'episode length'
            self.done = True
        
        self.to_target_vec = np.array(self.fingertip.current_position()) - np.array(self.target.current_position())
        reward_dist = - np.linalg.norm(self.to_target_vec)
        reward_ctrl = - np.square(action).sum()
        reward_rotation = - np.square(self.theta_dot*self.dt) - np.square(self.gamma_dot*self.dt)
github Healthcare-Robotics / assistive-gym / assistive_gym / envs / drinking.py View on Github external
batch_positions = []
        for i in range(4):
            for j in range(4):
                for k in range(4):
                    batch_positions.append(np.array([i*2*water_radius-0.02, j*2*water_radius-0.02, k*2*water_radius+0.075]) + cup_pos)
        self.waters = self.create_spheres(radius=water_radius, mass=water_mass, batch_positions=batch_positions, visual=False, collision=True)
        for w in self.waters:
            p.changeVisualShape(w.body, -1, rgbaColor=[0.25, 0.5, 1, 1], physicsClientId=self.id)
        self.total_water_count = len(self.waters)

        # Enable rendering
        p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1, physicsClientId=self.id)

        # Drop water in the cup
        for _ in range(50):
            p.stepSimulation(physicsClientId=self.id)

        self.init_env_variables()
        return self._get_obs()
github google-research / google-research / dql_grasping / grasping_env.py View on Github external
self._env_step = 0

    for i in range(len(self._urdf_list)):
      xpos = 0.4 + self._block_random * random.random()
      ypos = self._block_random * (random.random() - .5)
      # random angle
      angle = np.pi / 2 + self._block_random * np.pi * random.random()
      ori = pybullet.getQuaternionFromEuler([0, 0, angle])
      pybullet.resetBasePositionAndOrientation(
          self._block_uids[i], [xpos, ypos, .15],
          [ori[0], ori[1], ori[2], ori[3]],
          physicsClientId=self.cid)
      # Let each object fall to the tray individual, to prevent object
      # intersection.
      for _ in range(500):
        pybullet.stepSimulation(physicsClientId=self.cid)

    # Let the blocks settle and move arm down into a closer approach pose.
    self._kuka.reset()
    # note the velocity continues throughout the grasp.
    self._kuka.applyAction([0, 0, -0.3, 0, 0.3])
    for i in range(100):
      pybullet.stepSimulation(physicsClientId=self.cid)
    return self._get_observation()
github araffin / robotics-rl-srl / environments / mobile_robot / mobile_robot_env.py View on Github external
wall_right = p.loadURDF(wall_urdf, [self._max_x / 2, self._max_y, 0], useFixedBase=True)
        p.changeVisualShape(wall_right, -1, rgbaColor=green)

        wall_top = p.loadURDF(wall_urdf, [self._min_x, self._max_y / 2, 0],
                              p.getQuaternionFromEuler([0, 0, np.pi / 2]), useFixedBase=True)
        p.changeVisualShape(wall_top, -1, rgbaColor=blue)

        self.walls = [wall_left, wall_bottom, wall_right, wall_top]

        # Add mobile robot
        self.robot_uid = p.loadURDF(os.path.join(self._urdf_root, "racecar/racecar.urdf"), self.robot_pos,
                                    useFixedBase=True)

        self._env_step_counter = 0
        for _ in range(50):
            p.stepSimulation()

        self._observation = self.getObservation()

        if self.saver is not None:
            self.saver.reset(self._observation, self.getTargetPos(), self.getGroundTruth())

        if self.srl_model != "raw_pixels":
            return self.getSRLState(self._observation)

        return np.array(self._observation)
github jhu-lcsr / costar_plan / costar_task_plan / python / costar_task_plan / simulation / world.py View on Github external
def _update_environment(self):
        '''
        Step the simulation forward after all actors have given their comments
        to associated simulated robots. Then update all actors' states.
        '''

        # Loop through the given number of steps
        for i in xrange(self.num_steps):
            pb.stepSimulation()

        # Update the states of all actors.
        for actor in self.actors:
            actor.state = actor.getState()
            actor.state.t = self.ticks * self.dt
github jhu-lcsr / costar_plan / costar_task_plan / python / costar_task_plan / simulation / world.py View on Github external
def _update_environment(self):
        '''
        Step the simulation forward after all actors have given their comments
        to associated simulated robots. Then update all actors' states.
        '''

        # Loop through the given number of steps
        for i in xrange(self.num_steps):
            pb.stepSimulation()

        # Update the states of all actors.
        for actor in self.actors:
            actor.state = actor.getState()
            actor.state.t = self.ticks * self.dt
github google-research / google-research / dql_grasping / grasping_env.py View on Github external
if self._renders:
        time.sleep(self._time_step)
      if self._termination():
        break

    # If we are close to the bin, attempt grasp.
    state = pybullet.getLinkState(self._kuka.kukaUid,
                                  self._kuka.kukaEndEffectorIndex,
                                  physicsClientId=self.cid)
    end_effector_pos = state[0]
    if end_effector_pos[2] <= 0.1:
      finger_angle = 0.3
      for _ in range(1000):
        grasp_action = [0, 0, 0.001, 0, finger_angle]
        self._kuka.applyAction(grasp_action)
        pybullet.stepSimulation(physicsClientId=self.cid)
        finger_angle -= 0.3/100.
        if finger_angle < 0:
          finger_angle = 0
      self._attempted_grasp = True
    observation = self._get_observation()
    done = self._termination()
    reward = self._reward()

    debug = {
        'grasp_success': self._grasp_success
    }
    return observation, reward, done, debug