How to use the pandas.DatetimeIndex function in pandas

To help you get started, we’ve selected a few pandas examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github quantrocket-llc / moonshot / tests / test_benchmark.py View on Github external
def mock_get_prices(*args, **kwargs):

            dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03"])
            fields = ["Close","Open"]
            times = ["09:30:00", "15:30:00"]
            idx = pd.MultiIndex.from_product(
                [fields, dt_idx, times], names=["Field", "Date", "Time"])

            prices = pd.DataFrame(
                {
                    "FI12345": [
                        # Close
                        9.6,
                        10.45,
                        10.12,
                        15.45,
                        8.67,
                        12.30,
                        # Open
github BreakingBytes / pvfree / pvfree / tests.py View on Github external
def test_solpos_calc(self):
        data={
            'lat': 38.2,
            'lon': -122.1,
            'freq': 'T',
            'tz': -8,
            'start': '2018-01-01 07:00',
            'end': '2018-01-01 08:00'
        }
        r = self.client.get('/api/v1/pvlib/solarposition/', data)
        self.assertEqual(r.status_code, 200)
        s = pd.DataFrame(r.json()).T
        t = pd.DatetimeIndex(s.index)
        times = pd.DatetimeIndex(
            start=data['start'], end=data['end'],
            freq=data['freq'], tz='Etc/GMT{:+d}'.format(-data['tz']))
        solpos = solarposition.get_solarposition(
            times, data['lat'], data['lon'])
        assert np.allclose(
            times.values.astype(int), t.values.astype(int))
        assert np.allclose(solpos.apparent_zenith, s.apparent_zenith)
        assert np.allclose(solpos.azimuth, s.azimuth)
github quantrocket-llc / moonshot / tests / test_cache.py View on Github external
def mock_get_prices(*args, **kwargs):

            dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03", "2018-05-04"])
            fields = ["Close","Volume"]
            idx = pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"])

            prices = pd.DataFrame(
                {
                    "FI12345": [
                        # Close
                        9,
                        11,
                        10.50,
                        9.99,
                        # Volume
                        5000,
                        16000,
                        8800,
                        9900
github nilmtk / nilmtk / nilmtk / dataset_converters / dataport / download_dataport.py View on Github external
def _dataport_dataframe_to_hdf(dataport_dataframe,
                               store,
                               nilmtk_building_id,
                               dataport_building_id,
                               timestamp_name,
                               metadata_dir,
                               user_selected_table):
    local_dataframe = dataport_dataframe.copy()

    # remove timezone information to avoid append errors
    local_dataframe[timestamp_name] = pd.DatetimeIndex([i.replace(tzinfo=None)
                                                       for i in local_dataframe[timestamp_name]])
    # set timestamp as frame index
    local_dataframe = local_dataframe.set_index(timestamp_name)

    # set timezone
    local_dataframe = local_dataframe.tz_localize('US/Central')
    # remove timestamp column from dataframe
    feeds_dataframe = local_dataframe.drop('dataid', axis=1)
    # Column names for dataframe
    column_names = [('power', 'active')]
    # convert from kW to W for realpower data
    if(user_selected_table.find('_realpower_') > 0):
        feeds_dataframe = feeds_dataframe.mul(1000)
    # building metadata
    building_metadata = {}
    building_metadata['instance'] = nilmtk_building_id
github oemof / oemof-tabular / src / oemof / tabular / datapackage / reading.py View on Github external
temporal = pd.DataFrame.from_dict(
                package.get_resource("temporal").read(keyed=True)
            ).set_index("timeindex").astype(float)
            # for correct freq setting of timeindex
            temporal.index = pd.DatetimeIndex(
                temporal.index.values, freq=temporal.index.inferred_freq,
                name="timeindex")
            timeindex = temporal.index

        # if no temporal provided as resource, take the first timeindex
        # from dict
        else:
            # if lst is not empty
            if lst:
                idx = pd.DatetimeIndex(lst[0])
                timeindex = pd.DatetimeIndex(idx.values,
                                             freq=idx.inferred_freq,
                                             name="timeindex")
                temporal = None
            # if for any reason lst of datetimeindices is empty
            # (i.e. no sequences) have been provided, set datetime to one time
            # step of today (same as in the EnergySystem __init__ if no
            # timeindex is passed)
            else:
                timeindex = pd.date_range(start=pd.to_datetime("today"),
                                          periods=1, freq="H")

        es = (cls(timeindex=timeindex, temporal=temporal)
              if lst
              else cls())

        es.add(*chain(data["components"].values(),
github man-group / mdf / mdf / lab / __init__.py View on Github external
cur_ctx = _get_current_context()
        root_ctx = cur_ctx.get_parent() or cur_ctx
        end_date, nodes = args[0], args[1:]
        end_date = _parse_datetime(end_date, self.shell.user_global_ns, self.shell.user_ns)
        nodes = map(lambda x: eval(x, self.shell.user_global_ns, self.shell.user_ns), nodes)

        df_ctx = root_ctx
        if len(nodes) > 0 and isinstance(nodes[-1], (dict, list, tuple)):
            shift_sets = _get_shift_sets(args[-1], nodes.pop())
            assert len(shift_sets) <= 1, "Only one shift set allowed for %mdf_evalto"
            if shift_sets:
                unused, shift_set = shift_sets[0]
                df_ctx = df_ctx.shift(shift_set=shift_set)

        df_builder = DataFrameBuilder(nodes, filter=True)
        date_range = pd.DatetimeIndex(start=cur_ctx.get_date(), end=end_date, freq=self.__timestep)
        for dt in date_range:
            root_ctx.set_date(dt)
            df_builder(dt, df_ctx)
        return df_builder.get_dataframe(df_ctx)
github dcs4cop / xcube / xcube / webapi / controllers / timeseries.py View on Github external
raise ValueError('max_valids must be either None, -1 or positive')

    var_values_map = dict()
    for key, var_name in key_to_var_names.items():
        values = time_series_ds[var_name].values
        if np.issubdtype(values.dtype, np.floating):
            num_type = float
        elif np.issubdtype(values.dtype, np.integer):
            num_type = int
        elif np.issubdtype(values.dtype, np.dtype(bool)):
            num_type = bool
        else:
            raise ValueError(f'cannot convert {values.dtype} into JSON-convertible value')
        var_values_map[key] = [(num_type(v) if f else None) for f, v in zip(np.isfinite(values), values)]

    time_values = [t.isoformat() + 'Z' for t in pd.DatetimeIndex(time_series_ds.time.values).round('s')]

    max_number_of_observations = time_series_ds.attrs.get('max_number_of_observations', 1)
    num_times = len(time_values)
    time_series = []

    max_valids_is_positive = max_valids is not None and max_valids > 0
    if max_valids_is_positive:
        time_indexes = range(num_times - 1, -1, -1)
    else:
        time_indexes = range(num_times)

    for time_index in time_indexes:

        if len(time_series) == max_valids:
            break
github robintw / Py6S / Py6S / SixSHelpers / aeronet.py View on Github external
"""
        try:
            import pandas
        except ImportError:
            raise ImportError("Importing AERONET data requires the pandas module. Please see http://pandas.pydata.org/ for installation instructions.")

        # Load in the data from the file
        try:
            df = pandas.read_csv(filename, skiprows=3, na_values=["N/A"])
        except:
            raise ParameterError("AERONET file", "Error reading AERONET file - does it exist and contain data?")

        # Parse the dates/times properly and set them up as the index
        df['Date(dd-mm-yyyy)'] = df['Date(dd-mm-yyyy)'].apply(cls._to_iso_date)
        df['timestamp'] = df.apply(lambda s: pandas.to_datetime(s['Date(dd-mm-yyyy)'] + " " + s['Time(hh:mm:ss)']), axis=1)
        df.index = pandas.DatetimeIndex(df.timestamp)

        given_time = dateutil.parser.parse(time, dayfirst=True)

        df['timediffs'] = np.abs(df.timestamp - given_time).astype('timedelta64[ns]')

        # Get the AOT data at the closest time that has AOT
        # (may be closer to the given_time than the closest
        # time that has full aerosol model information)
        aot = cls._get_aot(df)
        # print "AOT = %f" % aot

        refr_ind, refi_ind, wvs, radii_ind, radii = cls._get_model_columns(df)

        # Get the indices we're interested in from the main df
        inds = refr_ind + refi_ind + radii_ind + [len(df.columns) - 1]
github hugadams / scikit-spectra / pyuvvis / core / timeindex.py View on Github external
    @datetimeindex.setter
    def datetimeindex(self, dti):
        
        if not isinstance(dti, DatetimeIndex):
            try:
                dti = DatetimeIndex(dti)
            except Exception:
                raise IndexError('Could not store DatetimeIndex; wrong type %s' \
                                 % type(dti))
            else:
                if len(self) != len(dti):
                    raise IndexError("Length mismatch between passed"
                         "datetimeindex %s and object %s" % (len(dti), len(self)))
                
        self._stored_dti = dti