How to use the airsim.client.ImageType function in airsim

To help you get started, we’ve selected a few airsim examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github nesl / UnrealAirSimDRL / Deep_Reinforcement_Learning / Library / ClientAirSimEnvironments / AutoQuadcoptersUnrealEnvironment.py View on Github external
def get_new_images(self, vehicleName):
        # Wait to grad images
        tic = time.time()
        print("Collecting Images from AirSim")
        if (self.image_mask_FC_FR_FL[0] and  self.image_mask_FC_FR_FL[1] and  self.image_mask_FC_FR_FL[2]): 
            images = self.client.simGetImages([
            client.ImageRequest("0", client.ImageType.Scene, False, False), # Front Center
            client.ImageRequest("1", client.ImageType.Scene, False, False), # Front Right
            client.ImageRequest("2", client.ImageType.Scene, False, False)], vehicle_name = vehicleName) # Front Left
            img1d_FC = np.fromstring(images[0].image_data_uint8, dtype=np.uint8) 
            img_rgba_FC = np.array(img1d_FC.reshape(images[0].height, images[0].width, 4), dtype = np.uint8)
            img_rgb_FC = img_rgba_FC[:,:,0:3]
            
            img1d_FR = np.fromstring(images[1].image_data_uint8, dtype=np.uint8) 
            img_rgba_FR = np.array(img1d_FR.reshape(images[1].height, images[1].width, 4), dtype = np.uint8)
            img_rgb_FR = img_rgba_FR[:,:,0:3]
            
            img1d_FL = np.fromstring(images[2].image_data_uint8, dtype=np.uint8) 
            img_rgba_FL = np.array(img1d_FL.reshape(images[2].height, images[2].width, 4), dtype = np.uint8)
            img_rgb_FL = img_rgba_FL[:,:,0:3]
            
            # Can either use the RGBA images or the RGB Images
            
            self.images_rgba[vehicleName] = np.dstack((img_rgba_FC,img_rgba_FR,img_rgba_FL))
github nesl / UnrealAirSimDRL / Deep_Reinforcement_Learning / Library / ClientAirSimEnvironments / AutoQuadcopterUnrealEnvironment.py View on Github external
def get_new_images(self):
        # Wait to grad images
        tic = time.time()
        if (self.image_mask_FC_FR_FL[0] and  self.image_mask_FC_FR_FL[1] and  self.image_mask_FC_FR_FL[2]): 

            images = self.client.simGetImages([
            client.ImageRequest("0", client.ImageType.Scene, False, False), # Front Center
            client.ImageRequest("1", client.ImageType.Scene, False, False), # Front Right
            client.ImageRequest("2", client.ImageType.Scene, False, False)]) # Front Left
            img1d_FC = np.fromstring(images[0].image_data_uint8, dtype=np.uint8) 
            img_rgba_FC = np.array(img1d_FC.reshape(images[0].height, images[0].width, 4), dtype = np.uint8)
            img_rgb_FC = img_rgba_FC[:,:,0:3]
            
            img1d_FR = np.fromstring(images[1].image_data_uint8, dtype=np.uint8) 
            img_rgba_FR = np.array(img1d_FR.reshape(images[1].height, images[1].width, 4), dtype = np.uint8)
            img_rgb_FR = img_rgba_FR[:,:,0:3]
            
            img1d_FL = np.fromstring(images[2].image_data_uint8, dtype=np.uint8) 
            img_rgba_FL = np.array(img1d_FL.reshape(images[2].height, images[2].width, 4), dtype = np.uint8)
            img_rgb_FL = img_rgba_FL[:,:,0:3]
            
            # Can either use the RGBA images or the RGB Images
            
            self.images_rgba = np.dstack((img_rgba_FC,img_rgba_FR,img_rgba_FL))
            self.images_rgb = np.dstack((img_rgb_FC,img_rgb_FR,img_rgb_FL))
github nesl / UnrealAirSimDRL / Deep_Reinforcement_Learning / Library / ClientAirSimEnvironments / AutoQuadcoptersUnrealEnvironment.py View on Github external
def get_new_images(self, vehicleName):
        # Wait to grad images
        tic = time.time()
        print("Collecting Images from AirSim")
        if (self.image_mask_FC_FR_FL[0] and  self.image_mask_FC_FR_FL[1] and  self.image_mask_FC_FR_FL[2]): 
            images = self.client.simGetImages([
            client.ImageRequest("0", client.ImageType.Scene, False, False), # Front Center
            client.ImageRequest("1", client.ImageType.Scene, False, False), # Front Right
            client.ImageRequest("2", client.ImageType.Scene, False, False)], vehicle_name = vehicleName) # Front Left
            img1d_FC = np.fromstring(images[0].image_data_uint8, dtype=np.uint8) 
            img_rgba_FC = np.array(img1d_FC.reshape(images[0].height, images[0].width, 4), dtype = np.uint8)
            img_rgb_FC = img_rgba_FC[:,:,0:3]
            
            img1d_FR = np.fromstring(images[1].image_data_uint8, dtype=np.uint8) 
            img_rgba_FR = np.array(img1d_FR.reshape(images[1].height, images[1].width, 4), dtype = np.uint8)
            img_rgb_FR = img_rgba_FR[:,:,0:3]
            
            img1d_FL = np.fromstring(images[2].image_data_uint8, dtype=np.uint8) 
            img_rgba_FL = np.array(img1d_FL.reshape(images[2].height, images[2].width, 4), dtype = np.uint8)
            img_rgb_FL = img_rgba_FL[:,:,0:3]
            
            # Can either use the RGBA images or the RGB Images
github nesl / UnrealAirSimDRL / Deep_Reinforcement_Learning / Library / ClientAirSimEnvironments / ManualQuadcopterUnrealEnvironment.py View on Github external
def get_new_images(self):
        # Construct the Images State Vector
        # Order is Front Center, Front Right, Front Left
        tic = time.time()
        self.client.simPause(True)
        if (self.image_mask_FC_FR_FL[0] and  self.image_mask_FC_FR_FL[1] and  self.image_mask_FC_FR_FL[2]): 
            images = self.client.simGetImages([client.ImageRequest("0", client.ImageType.Scene, False, False), # Front Center
            client.ImageRequest("1", client.ImageType.Scene, False, False), # Front Right
            client.ImageRequest("2", client.ImageType.Scene, False, False)]) # Front Left
            img1d_FC = np.fromstring(images[0].image_data_uint8, dtype=np.uint8) 
            img_rgba_FC = np.array(img1d_FC.reshape(images[0].height, images[0].width, 4), dtype = np.uint8)
            img_rgb_FC = img_rgba_FC[:,:,0:3]
            
            img1d_FR = np.fromstring(images[1].image_data_uint8, dtype=np.uint8) 
            img_rgba_FR = np.array(img1d_FR.reshape(images[1].height, images[1].width, 4), dtype = np.uint8)
            img_rgb_FR = img_rgba_FR[:,:,0:3]
            
            img1d_FL = np.fromstring(images[2].image_data_uint8, dtype=np.uint8) 
            img_rgba_FL = np.array(img1d_FL.reshape(images[2].height, images[2].width, 4), dtype = np.uint8)
            img_rgb_FL = img_rgba_FL[:,:,0:3]
            
            # Can either use the RGBA images or the RGB Images
            self.images_rgba = np.dstack((img_rgba_FC,img_rgba_FR,img_rgba_FL))
            self.images_rgb = np.dstack((img_rgb_FC,img_rgb_FR,img_rgb_FL))
github nesl / UnrealAirSimDRL / Deep_Reinforcement_Learning / Library / ClientAirSimEnvironments / AutoCarUnrealEnvironment.py View on Github external
def get_new_images(self):
        # Construct the Images State Vector
        # Order is Front Center, Front Right, Front Left
        tic = time.time()
        self.client.simPause(True)
        if (self.image_mask_FC_FR_FL[0] and  self.image_mask_FC_FR_FL[1] and  self.image_mask_FC_FR_FL[2]): 
            images = self.client.simGetImages([client.ImageRequest("0", client.ImageType.Scene, False, False), # Front Center
            client.ImageRequest("1", client.ImageType.Scene, False, False), # Front Right
            client.ImageRequest("2", client.ImageType.Scene, False, False)]) # Front Left
            img1d_FC = np.fromstring(images[0].image_data_uint8, dtype=np.uint8) 
            img_rgba_FC = np.array(img1d_FC.reshape(images[0].height, images[0].width, 4), dtype = np.uint8)
            img_rgb_FC = img_rgba_FC[:,:,0:3]
            
            img1d_FR = np.fromstring(images[1].image_data_uint8, dtype=np.uint8) 
            img_rgba_FR = np.array(img1d_FR.reshape(images[1].height, images[1].width, 4), dtype = np.uint8)
            img_rgb_FR = img_rgba_FR[:,:,0:3]
            
            #plt.imshow(img_rgb_FR)
            #plt.show()
            #time.sleep(2)
            
            img1d_FL = np.fromstring(images[2].image_data_uint8, dtype=np.uint8) 
            img_rgba_FL = np.array(img1d_FL.reshape(images[2].height, images[2].width, 4), dtype = np.uint8)
github nesl / UnrealAirSimDRL / Deep_Reinforcement_Learning / Library / ClientAirSimEnvironments / AutoCarsUnrealEnvironment.py View on Github external
img1d_FL = np.fromstring(images[2].image_data_uint8, dtype=np.uint8) 
                img_rgba_FL = np.array(img1d_FL.reshape(images[2].height, images[2].width, 4), dtype = np.uint8)
                img_rgb_FL = img_rgba_FL[:,:,0:3]
                
                #plt.imshow(img_rgb_FL)
                #plt.show()
                #time.sleep(2)
                
                # Can either use the RGBA images or the RGB Images
                self.images_rgba[vn] = np.dstack((img_rgba_FC,img_rgba_FR,img_rgba_FL))
                self.images_rgb[vn] = np.dstack((img_rgb_FC,img_rgb_FR,img_rgb_FL))
                print("Time to Grab All Images: ", time.time() - self.toc)
                
            # We Just want front view      
            elif (self.image_mask_FC_FR_FL[0] and not self.image_mask_FC_FR_FL[1] and not self.image_mask_FC_FR_FL[2]): 
                images = self.client.simGetImages([client.ImageRequest("0", client.ImageType.Scene, False, False)], vehicle_name = vn) # Front Center
                img1d_FC = np.fromstring(images[0].image_data_uint8, dtype=np.uint8) 
                img_rgba_FC = np.array(img1d_FC.reshape(images[0].height, images[0].width, 4), dtype = np.uint8)
                img_rgb_FC = img_rgba_FC[:,:,0:3]
                
                self.images_rgba[vn] = img_rgba_FC
                self.images_rgb[vn] = img_rgb_FC
                print("Time to Grab Images: ", time.time() - self.toc)
            
        
        self.client.simPause(False)
        # Store X and Y position information as well
        self.extra_metadata = self.dt
        # Start the timer again as the next state is saved. Return the current state and meta-data 
        self.tic = time.time()