How to use siphon - 10 common examples

To help you get started, we’ve selected a few siphon examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Unidata / python-gallery / examples / Sounding_Plotter.py View on Github external
parser.add_argument('-d', '--date', help='Date and time to request data for in YYYYMMDDHH.'
                        ' Defaults to most recent 00/12 hour.', type=str)
    parser.add_argument('-g', '--gdrive', help='Google Drive upload path', type=str)
    parser.add_argument('-f', '--filename', help='Image filename', type=str)
    args = parser.parse_args()

    if args.date:
        request_time = datetime.strptime(args.date, '%Y%m%d%H')
    else:
        # Figure out the most recent sounding, 00 or 12. Subtracting two hours
        # helps ensure that we choose a time with data available.
        now = datetime.utcnow() - timedelta(hours=2)
        request_time = now.replace(hour=(now.hour // 12) * 12, minute=0, second=0)

    # Request the data and plot
    df = WyomingUpperAir.request_data(request_time, args.site)
    skewt = plot_skewt(df)

    # Add the timestamp for the data to the plot
    add_timestamp(skewt.ax, request_time, y=1.02, x=0, ha='left', fontsize='large')
    skewt.ax.set_title(args.site)

    if args.show:
        plt.show()
    else:
        fname = args.filename if args.filename else make_name(args.site, request_time)
        if args.gdrive:
            uploader = DriveUploader()
            with tempfile.NamedTemporaryFile(suffix='.png') as f:
                skewt.ax.figure.savefig(f.name)
                uploader.upload_to(f.name, posixpath.join(args.gdrive, fname))
        else:
github Unidata / siphon / dev / _downloads / 94ea07e3752cae6a77f64f43a5455c2f / latest_request.py View on Github external
NDBC Latest Data Request
========================

This example shows how to use siphon's `simplewebswervice` support query the most recent
observations from all of the NDBC buoys at once.
"""

import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt

from siphon.simplewebservice.ndbc import NDBC

####################################################
# Get a pandas data frame of all of the observations
df = NDBC.latest_observations()
df.head()

####################################################
# In this case I'm going to drop buoys that do not have water temperature measurements.
df.dropna(subset=['water_temperature'], inplace=True)

####################################################
# Let's make a simple plot of the buoy positions and color by water temperature
proj = ccrs.LambertConformal(central_latitude=45., central_longitude=-100.,
                             standard_parallels=[30, 60])

fig = plt.figure(figsize=(17., 11.))
ax = plt.axes(projection=proj)
ax.coastlines('50m', edgecolor='black')
ax.add_feature(cfeature.OCEAN.with_scale('50m'))
ax.add_feature(cfeature.LAND.with_scale('50m'))
github Unidata / siphon / siphon / radarserver.py View on Github external
def _get_metadata(self):
        ds_cat = TDSCatalog(self.url_path('dataset.xml'))
        self.metadata = ds_cat.metadata
        self.variables = {k.split('/')[0] for k in self.metadata['variables']}
        self._get_stations()
github pvlib / pvlib-python / pvlib / forecast.py View on Github external
'''
        Retrieves the designated dataset, creates NCSS object, and
        creates a NCSS query object.
        '''

        keys = list(self.model.datasets.keys())
        labels = [item.split()[0].lower() for item in keys]
        if self.set_type == 'best':
            self.dataset = self.model.datasets[keys[labels.index('best')]]
        elif self.set_type == 'latest':
            self.dataset = self.model.datasets[keys[labels.index('latest')]]
        elif self.set_type == 'full':
            self.dataset = self.model.datasets[keys[labels.index('full')]]

        self.access_url = self.dataset.access_urls[self.access_url_key]
        self.ncss = NCSS(self.access_url)
        self.query = self.ncss.query()
github Unidata / python-gallery / examples / 850hPa_Temperature_Advection.py View on Github external
# Helper function for finding proper time variable
def find_time_var(var, time_basename='time'):
    for coord_name in var.coordinates.split():
        if coord_name.startswith(time_basename):
            return coord_name
    raise ValueError('No time variable found for ' + var.name)


###############################################
# Create NCSS object to access the NetcdfSubset
# ---------------------------------------------
# Data from NCEI GFS 0.5 deg Analysis Archive

base_url = 'https://www.ncei.noaa.gov/thredds/ncss/grid/gfs-g4-anl-files/'
dt = datetime(2017, 4, 5, 12)
ncss = NCSS('{}{dt:%Y%m}/{dt:%Y%m%d}/gfsanl_4_{dt:%Y%m%d}_'
            '{dt:%H}00_000.grb2'.format(base_url, dt=dt))

# Create lat/lon box for location you want to get data for
query = ncss.query().time(dt)
query.lonlat_box(north=65, south=15, east=310, west=220)
query.accept('netcdf')

# Request data for vorticity
query.variables('Geopotential_height_isobaric', 'Temperature_isobaric',
                'u-component_of_wind_isobaric', 'v-component_of_wind_isobaric')
data = ncss.get_data(query)

# Pull out variables you want to use
hght_var = data.variables['Geopotential_height_isobaric']
temp_var = data.variables['Temperature_isobaric']
u_wind_var = data.variables['u-component_of_wind_isobaric']
github Unidata / siphon / siphon / _version.py View on Github external
def git_versions_from_keywords(keywords, tag_prefix, verbose):
    """Get version information from git keywords."""
    if not keywords:
        raise NotThisMethod("no keywords at all, weird")
    date = keywords.get("date")
    if date is not None:
        # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
        # datestamp. However we prefer "%ci" (which expands to an "ISO-8601
        # -like" string, which we must then edit to make compliant), because
        # it's been around since git-1.5.3, and it's too difficult to
        # discover which version we're using, or to work around using an
        # older one.
        date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
    refnames = keywords["refnames"].strip()
    if refnames.startswith("$Format"):
        if verbose:
            print("keywords are unexpanded, not using")
        raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
    refs = set([r.strip() for r in refnames.strip("()").split(",")])
    # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
github Unidata / siphon / siphon / catalog.py View on Github external
def resolve_url(self, catalog_url):
        """Resolve the url of the dataset when reading latest.xml.

        Parameters
        ----------
        catalog_url : str
            The catalog url to be resolved

        """
        if catalog_url != '':
            resolver_base = catalog_url.split('catalog.xml')[0]
            resolver_url = resolver_base + self.url_path
            resolver_xml = session_manager.urlopen(resolver_url)
            tree = ET.parse(resolver_xml)
            root = tree.getroot()
            if 'name' in root.attrib:
                self.catalog_name = root.attrib['name']
            else:
                self.catalog_name = 'No name found'
            resolved_url = ''
            found = False
            for child in root.iter():
                if not found:
                    tag_type = child.tag.split('}')[-1]
                    if tag_type == 'dataset':
                        if 'urlPath' in child.attrib:
                            ds = Dataset(child)
                            resolved_url = ds.url_path
                            found = True
github Unidata / siphon / siphon / simplewebservice / wyoming.py View on Github external
site_id : str
            The three letter ICAO identifier of the station for which data should be
            downloaded.

        Returns
        -------
            :class:`pandas.DataFrame` containing the data

        """
        raw_data = self._get_data_raw(time, site_id)
        soup = BeautifulSoup(raw_data, 'html.parser')
        tabular_data = StringIO(soup.find_all('pre')[0].contents[0])
        col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
        df = pd.read_fwf(tabular_data, skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
        df['u_wind'], df['v_wind'] = get_wind_components(df['speed'],
                                                         np.deg2rad(df['direction']))

        # Drop any rows with all NaN values for T, Td, winds
        df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
                               'u_wind', 'v_wind'), how='all').reset_index(drop=True)

        # Parse metadata
        meta_data = soup.find_all('pre')[1].contents[0]
        lines = meta_data.splitlines()

        # If the station doesn't have a name identified we need to insert a
        # record showing this for parsing to proceed.
        if 'Station number' in lines[1]:
            lines.insert(1, 'Station identifier: ')

        station = lines[1].split(':')[1].strip()
github Unidata / siphon / siphon / catalog.py View on Github external
resolver_url = resolver_base + self.url_path
            resolver_xml = session_manager.urlopen(resolver_url)
            tree = ET.parse(resolver_xml)
            root = tree.getroot()
            if 'name' in root.attrib:
                self.catalog_name = root.attrib['name']
            else:
                self.catalog_name = 'No name found'
            resolved_url = ''
            found = False
            for child in root.iter():
                if not found:
                    tag_type = child.tag.split('}')[-1]
                    if tag_type == 'dataset':
                        if 'urlPath' in child.attrib:
                            ds = Dataset(child)
                            resolved_url = ds.url_path
                            found = True
            if found:
                return resolved_url
            else:
                log.warning('no dataset url path found in latest.xml!')
github eWaterCycle / jupyterlab_thredds / jupyterlab_thredds / crawler.py View on Github external
"""Same as siphon.catalog.TDSCatalog, but with constructor which takes url and response

        So it can be used by a generic asynchronous web crawler

        Args:
            catalog_url: URL with THREDDS catalog xml file
            content: Content found at catalog_url
        """
        self.catalog_url = catalog_url
        self.base_tds_url = _find_base_tds_url(self.catalog_url)

        # begin parsing the xml doc
        root = ET.fromstring(content)
        self.catalog_name = root.attrib.get('name', 'No name found')

        self.datasets = DatasetCollection()
        self.services = []
        self.catalog_refs = DatasetCollection()
        self.metadata = {}
        self.ds_with_access_elements_to_process = []
        service_skip_count = 0
        service_skip = 0
        current_dataset = None
        previous_dataset = None
        for child in root.iter():
            tag_type = child.tag.split('}')[-1]
            if tag_type == 'dataset':
                current_dataset = child.attrib['name']
                self._process_dataset(child)

                # see if the previously processed dataset has access elements as children
                # if so, these datasets need to be processed specially when making