How to use the geopandas.pd function in geopandas

To help you get started, we’ve selected a few geopandas examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github scottyhq / dinosar / bin / prep_topsApp.py View on Github external
def download_orbit(granuleName):
    '''
    Grab orbit files from ASF
    '''
    cwd = os.getcwd()
    os.chdir(os.environ['POEORB'])
    sat = granuleName[:3]
    date = granuleName[17:25]
    print('downloading orbit for {}, {}'.format(sat,date))

    url = 'https://s1qc.asf.alaska.edu/aux_poeorb'
    r = requests.get(url)
    webpage = html.fromstring(r.content)
    orbits = webpage.xpath('//a/@href')
    # get s1A or s1B
    df = gpd.pd.DataFrame(dict(orbit=orbits))
    dfSat = df[df.orbit.str.startswith(sat)]
    dayBefore = gpd.pd.to_datetime(date) - gpd.pd.to_timedelta(1, unit='d')
    dayBeforeStr = dayBefore.strftime('%Y%m%d')
    # get matching orbit file
    dfSat['startTime'] = dfSat.orbit.str[42:50]
    match = dfSat.loc[dfSat.startTime == dayBeforeStr, 'orbit'].values[0]
    cmd = 'wget -q -nc {}/{}'.format(url,match) #-nc means no clobber
    print(cmd)
    os.system(cmd)
    os.chdir(cwd)
github scottyhq / dinoSARaws / bin / prep_topsApp.py View on Github external
'''
    cwd = os.getcwd()
    try:
        os.chdir(os.environ['POEORB'])
        sat = granuleName[:3]
        date = granuleName[17:25]
        print('downloading orbit for {}, {}'.format(sat,date))

        url = 'https://s1qc.asf.alaska.edu/aux_poeorb'
        r = requests.get(url)
        webpage = html.fromstring(r.content)
        orbits = webpage.xpath('//a/@href')
        # get s1A or s1B
        df = gpd.pd.DataFrame(dict(orbit=orbits))
        dfSat = df[df.orbit.str.startswith(sat)]
        dayBefore = gpd.pd.to_datetime(date) - gpd.pd.to_timedelta(1, unit='d')
        dayBeforeStr = dayBefore.strftime('%Y%m%d')
        # get matching orbit file
        dfSat['startTime'] = dfSat.orbit.str[42:50]
        match = dfSat.loc[dfSat.startTime == dayBeforeStr, 'orbit'].values[0]
        cmd = 'wget -q -nc {}/{}'.format(url,match) #-nc means no clobber
        print(cmd)
        os.system(cmd)
    except Exception as e:
        print('Trouble downloading POEORB... maybe scene is too recent?')
        print(e)
        pass
    os.chdir(cwd) #NOTE: best to specifiy download dir instead of jumping cwd around...
github scottyhq / dinosar / bin / prep_topsApp.py View on Github external
Grab orbit files from ASF
    '''
    cwd = os.getcwd()
    os.chdir(os.environ['POEORB'])
    sat = granuleName[:3]
    date = granuleName[17:25]
    print('downloading orbit for {}, {}'.format(sat,date))

    url = 'https://s1qc.asf.alaska.edu/aux_poeorb'
    r = requests.get(url)
    webpage = html.fromstring(r.content)
    orbits = webpage.xpath('//a/@href')
    # get s1A or s1B
    df = gpd.pd.DataFrame(dict(orbit=orbits))
    dfSat = df[df.orbit.str.startswith(sat)]
    dayBefore = gpd.pd.to_datetime(date) - gpd.pd.to_timedelta(1, unit='d')
    dayBeforeStr = dayBefore.strftime('%Y%m%d')
    # get matching orbit file
    dfSat['startTime'] = dfSat.orbit.str[42:50]
    match = dfSat.loc[dfSat.startTime == dayBeforeStr, 'orbit'].values[0]
    cmd = 'wget -q -nc {}/{}'.format(url,match) #-nc means no clobber
    print(cmd)
    os.system(cmd)
    os.chdir(cwd)
github scottyhq / dinosar / bin / prep_topsApp.py View on Github external
def load_inventory(vectorFile):
    '''
    load merged inventory. easy!
    '''
    gf = gpd.read_file(vectorFile)
    gf['timeStamp'] = gpd.pd.to_datetime(gf.sceneDate, format='%Y-%m-%d %H:%M:%S')
    gf['sceneDateString'] = gf.timeStamp.apply(lambda x: x.strftime('%Y-%m-%d'))
    gf['dateStamp'] = gpd.pd.to_datetime(gf.sceneDateString)
    gf['utc'] = gf.timeStamp.apply(lambda x: x.strftime('%H:%M:%S'))
    gf['orbitCode'] = gf.relativeOrbit.astype('category').cat.codes
    return gf
github scottyhq / dinosar / bin / prep_topsApp.py View on Github external
def load_inventory(vectorFile):
    '''
    load merged inventory. easy!
    '''
    gf = gpd.read_file(vectorFile)
    gf['timeStamp'] = gpd.pd.to_datetime(gf.sceneDate, format='%Y-%m-%d %H:%M:%S')
    gf['sceneDateString'] = gf.timeStamp.apply(lambda x: x.strftime('%Y-%m-%d'))
    gf['dateStamp'] = gpd.pd.to_datetime(gf.sceneDateString)
    gf['utc'] = gf.timeStamp.apply(lambda x: x.strftime('%H:%M:%S'))
    gf['orbitCode'] = gf.relativeOrbit.astype('category').cat.codes
    return gf
github scottyhq / dinoSARaws / bin / prep_topsApp.py View on Github external
def load_inventory(vectorFile):
    '''
    load merged inventory. easy!
    '''
    gf = gpd.read_file(vectorFile)
    gf['timeStamp'] = gpd.pd.to_datetime(gf.sceneDate, format='%Y-%m-%d %H:%M:%S')
    gf['sceneDateString'] = gf.timeStamp.apply(lambda x: x.strftime('%Y-%m-%d'))
    gf['dateStamp'] = gpd.pd.to_datetime(gf.sceneDateString)
    gf['utc'] = gf.timeStamp.apply(lambda x: x.strftime('%H:%M:%S'))
    gf['orbitCode'] = gf.relativeOrbit.astype('category').cat.codes
    return gf
github scottyhq / dinoSARaws / bin / prep_topsApp.py View on Github external
def load_inventory(vectorFile):
    '''
    load merged inventory. easy!
    '''
    gf = gpd.read_file(vectorFile)
    gf['timeStamp'] = gpd.pd.to_datetime(gf.sceneDate, format='%Y-%m-%d %H:%M:%S')
    gf['sceneDateString'] = gf.timeStamp.apply(lambda x: x.strftime('%Y-%m-%d'))
    gf['dateStamp'] = gpd.pd.to_datetime(gf.sceneDateString)
    gf['utc'] = gf.timeStamp.apply(lambda x: x.strftime('%H:%M:%S'))
    gf['orbitCode'] = gf.relativeOrbit.astype('category').cat.codes
    return gf
github datadesk / census-map-downloader / census_map_downloader / geotypes / blockgroups.py View on Github external
# Loop through all the states and download the shapes
        path_list = []
        for state in us.STATES:
            logger.debug(f"Downloading {state}")
            shp_path = StateBlockGroupsDownloader2018(
                state.abbr,
                data_dir=self.data_dir
            ).run()
            path_list.append(shp_path)

        # Open all the shapes
        df_list = [gpd.read_file(p) for p in path_list]

        # Concatenate them together
        df = gpd.pd.concat(df_list)

        logger.debug(f"Writing file with {len(df)} blocks groups to {self.merged_path}")
        df.to_file(self.merged_path, index=False)