How to use the pendulum.parse function in pendulum

To help you get started, we’ve selected a few pendulum examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Delgan / loguru / tests / test_rotation.py View on Github external
def test_time_rotation(monkeypatch_now, tmpdir, when, hours):
    now = pendulum.parse("2017-06-18 12:00:00")  # Sunday

    monkeypatch_now(lambda *a, **k: now)

    file_1 = tmpdir.join("test.log")
    file_2 = tmpdir.join("test.log.1")
    file_3 = tmpdir.join("test.log.2")
    file_4 = tmpdir.join("test.log.3")

    logger.start(file_1.realpath(), format='{message}', rotation=when)

    m1, m2, m3, m4, m5 = 'a', 'b', 'c', 'd', 'e'

    for h, m in zip(hours, [m1, m2, m3, m4, m5]):
        now = now.add(hours=h)
        logger.debug(m)
github CenterForOpenScience / SHARE / tests / share / test_harvest.py View on Github external
def test_latest_date_null(self):
        source_config = factories.SourceConfigFactory(
            full_harvest=True,
            earliest_date=pendulum.parse('2017-01-01').date()
        )
        assert len(HarvestScheduler(source_config).all(cutoff=pendulum.parse('2018-01-01').date())) == 365
github CenterForOpenScience / SHARE / share / util / graph.py View on Github external
def _merge_node_attrs(self, from_node, into_node):
        into_attrs = into_node.attrs()
        for k, new_val in from_node.attrs().items():
            if k in into_attrs:
                old_val = into_attrs[k]
                if new_val == old_val:
                    continue

                field = resolve_field(into_node.model, k)
                if isinstance(field, DateTimeField):
                    new_val = max(pendulum.parse(new_val), pendulum.parse(old_val)).isoformat()
                else:
                    new_val = self._merge_value(new_val, old_val)
            into_node[k] = new_val
github 510908220 / heartbeats / src / app / management / commands / check.py View on Github external
def process_at_service(self, service):
        """
        当 当前时间 > at时,看[at, at + grace]之间是否有上报的数据
        """
        latest_ping = self.get_last_ping(service)
        if not latest_ping:
            return

        at = pendulum.parse(service.value, tz=settings.TIME_ZONE).in_timezone('UTC')
        last_created = pendulum.instance(latest_ping.created)
        now = pendulum.now(tz='UTC')

        if now < at.add(minutes=int(service.grace)):
            return
        if last_created < at:
            self.notify(service, now)
github PrefectHQ / prefect / examples / github_release_cycle.py View on Github external
@task(trigger=any_failed)
def prepare_exception(exc):
    return repr(exc)


issue_task = OpenGitHubIssue(
    name="Open Release Issue",
    repo="PrefectHQ/cloud",
    title="Release Cycle is Broken",
    labels=["release", "bug"],
)


biweekly_schedule = IntervalSchedule(
    start_date=pendulum.parse("2019-03-18"), interval=datetime.timedelta(days=14)
)


with Flow("Biweekly Cloud Release", schedule=biweekly_schedule) as flow:
    exc = prepare_exception(pr_task)  # will only run if pr_task fails in some way
    issue = issue_task(body=exc)


flow.set_reference_tasks([pr_task])
flow.run()
github CenterForOpenScience / SHARE / share / ingest / change_builder.py View on Github external
len(new_model.__mro__) >= len(old_model.__mro__)

                    # Special case to allow creators to be downgraded to contributors
                    # This allows OSF users to mark project contributors as bibiliographic or non-bibiliographic
                    # and have that be reflected in SHARE
                    or issubclass(new_model, apps.get_model('share', 'contributor'))
            ):
                attrs_diff['@type'] = new_model._meta.label_lower

        for k, v in attrs.items():
            if k in ignore_attrs:
                logger.debug('Ignoring potentially conflicting change to "%s"', k)
                continue
            old_value = getattr(self.instance, k)
            if isinstance(old_value, datetime.datetime):
                v = pendulum.parse(v)
            if v != old_value:
                attrs_diff[k] = v.isoformat() if isinstance(v, datetime.datetime) else v

        # TODO Add relationships in for non-subjects. Somehow got omitted first time around
        if is_subject:
            for k, v in relations.items():
                old_value = getattr(self.instance, k)
                if old_value != self._get_match(v):
                    relations_diff[k] = v

        diff = {
            **attrs_diff,
            **self._relations_to_jsonld(relations_diff),
        }
        # If there's nothing to update, return None instead of an empty diff
        if not diff:
github CenterForOpenScience / SHARE / share / management / commands / migratescrapi.py View on Github external
FROM webview_document
                                WHERE source = '{source}'
                            """.format(source=source)
                        )

                        with transaction.atomic():
                            record_count = 0
                            records = cursor.fetchmany(size=cursor.itersize)

                            while records:
                                bulk = []
                                for (doc_id, raw) in records:
                                    if raw is None or raw == 'null' or raw['timestamps'] is None or raw['timestamps']['harvestFinished'] is None:
                                        print('{} -> {}: {} : raw is null'.format(source, target, doc_id))
                                        continue
                                    harvest_finished = pendulum.parse(raw['timestamps']['harvestFinished'])
                                    data = raw['doc'].encode()
                                    bulk.append(RawData(
                                        source=config.user,
                                        app_label=config.label,
                                        provider_doc_id=doc_id,
                                        sha256=sha256(data).hexdigest(),
                                        data=data,
                                        date_seen=harvest_finished.datetime,
                                        date_harvested=harvest_finished.datetime,
                                    ))
                                RawData.objects.bulk_create(bulk)
                                record_count += len(records)
                                print('{} -> {}: {}'.format(source, target, record_count))
                                records = cursor.fetchmany(size=cursor.itersize)
github PrefectHQ / prefect / src / prefect / client / client.py View on Github external
# load all task runs except dynamic task runs
                    with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): {
                        "id": True,
                        "task": {"id": True, "slug": True},
                        "version": True,
                        "serialized_state": True,
                    },
                }
            }
        }
        result = self.graphql(query).data.flow_run_by_pk  # type: ignore
        if result is None:
            raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id))

        # convert scheduled_start_time from string to datetime
        result.scheduled_start_time = pendulum.parse(result.scheduled_start_time)

        # create "state" attribute from serialized_state
        result.state = prefect.engine.state.State.deserialize(
            result.pop("serialized_state")
        )

        # reformat task_runs
        task_runs = []
        for tr in result.task_runs:
            tr.state = prefect.engine.state.State.deserialize(
                tr.pop("serialized_state")
            )
            task_info = tr.pop("task")
            tr.task_id = task_info["id"]
            tr.task_slug = task_info["slug"]
            task_runs.append(TaskRunInfoResult(**tr))
github apache / airflow / airflow / www_rbac / views.py View on Github external
def _mark_task_instance_state(self, dag_id, task_id, origin, execution_date,
                                  confirmed, upstream, downstream,
                                  future, past, state):
        dag = dagbag.get_dag(dag_id)
        task = dag.get_task(task_id)
        task.dag = dag

        execution_date = pendulum.parse(execution_date)

        if not dag:
            flash("Cannot find DAG: {}".format(dag_id))
            return redirect(origin)

        if not task:
            flash("Cannot find task {} in DAG {}".format(task_id, dag.dag_id))
            return redirect(origin)

        from airflow.api.common.experimental.mark_tasks import set_state

        if confirmed:
            altered = set_state(task=task, execution_date=execution_date,
                                upstream=upstream, downstream=downstream,
                                future=future, past=past, state=state,
                                commit=True)
github CenterForOpenScience / SHARE / bots / elasticsearch / bot.py View on Github external
self.setup()
        else:
            logger.debug('Skipping ES setup')

        logger.info('Loading up indexed models')
        for model_name in self.INDEX_MODELS:
            if self.es_models and model_name.lower() not in self.es_models:
                continue

            model = apps.get_model('share', model_name)

            if self.es_filter:
                logger.info('Looking for %ss that match %s', model, self.es_filter)
                qs = model.objects.filter(**self.es_filter).values_list('id', flat=True)
            else:
                most_recent_result = pendulum.parse(self.get_most_recently_modified())
                logger.info('Looking for %ss that have been modified after %s', model, most_recent_result)
                q = Q(date_modified__gt=most_recent_result)
                if hasattr(model, 'subjects') and hasattr(model, 'subject_relations'):
                    q = q | Q(subjects__date_modified__gt=most_recent_result) | Q(subject_relations__date_modified__gt=most_recent_result)
                qs = model.objects.filter(q).values_list('id', flat=True)

            for batch in chunk(qs.iterator(), chunk_size):
                if batch:
                    if not self.to_daemon:
                        tasks.index_model.apply_async((model.__name__, batch,), {'es_url': self.es_url, 'es_index': self.es_index})
                    else:
                        try:
                            SearchIndexer(celery.current_app).index(model.__name__, *batch, index=self.es_index if self.es_index != settings.ELASTICSEARCH['INDEX'] else None)
                        except ValueError:
                            logger.warning('Not sending model type %r to the SearchIndexer', model)