How to use the dagster.check.opt_inst_param function in dagster

To help you get started, we’ve selected a few dagster examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github dagster-io / dagster / python_modules / dagster / dagster_tests / check_tests / test_check.py View on Github external
pass

    class Bar(object):
        pass

    class Baaz(object):
        pass

    obj = Foo()

    assert check.opt_inst_param(obj, 'obj', Foo) == obj
    assert check.opt_inst_param(None, 'obj', Foo) is None
    assert check.opt_inst_param(None, 'obj', Bar) is None

    with pytest.raises(ParameterCheckError, match='not a Bar'):
        check.opt_inst_param(Bar, 'obj', Bar)

    with pytest.raises(ParameterCheckError, match='not a Bar'):
        check.opt_inst_param(Foo(), 'obj', Bar)

    # check defaults

    default_obj = Foo()

    assert check.opt_inst_param(None, 'obj', Foo, default_obj) is default_obj

    assert check.opt_inst_param(None, 'obj', (Foo, Bar)) is None

    with pytest.raises(ParameterCheckError, match=r"not one of \['Bar', 'Foo'\]"):
        check.inst_param(Baaz(), 'obj', (Foo, Bar))
github dagster-io / dagster / python_modules / dagster / dagster / core / execution / api.py View on Github external
def create_execution_plan(pipeline, environment_dict=None, run_config=None):
    check.inst_param(pipeline, 'pipeline', PipelineDefinition)
    environment_dict = check.opt_dict_param(environment_dict, 'environment_dict', key_type=str)
    run_config = check.opt_inst_param(run_config, 'run_config', IRunConfig, RunConfig())

    environment_config = EnvironmentConfig.build(pipeline, environment_dict, run_config)

    return ExecutionPlan.build(pipeline, environment_config, run_config)
github dagster-io / dagster / python_modules / dagster / dagster / core / execution / api.py View on Github external
Defaults to ``True``, since this is the most useful behavior in test.

    Returns:
      :py:class:`PipelineExecutionResult`: The result of pipeline execution.

    For the asynchronous version, see :py:func:`execute_pipeline_iterator`.

    This is the entrypoint for dagster CLI execution. For the dagster-graphql entrypoint, see
    ``dagster.core.execution.api.execute_plan()``.
    '''

    check.inst_param(pipeline, 'pipeline', PipelineDefinition)
    environment_dict = check.opt_dict_param(environment_dict, 'environment_dict')
    run_config = check_run_config_param(run_config, pipeline)

    check.opt_inst_param(instance, 'instance', DagsterInstance)
    instance = instance or DagsterInstance.ephemeral()

    execution_plan = create_execution_plan(pipeline, environment_dict, run_config)

    pipeline_run = _create_run(instance, pipeline, run_config, environment_dict)

    with scoped_pipeline_context(
        pipeline, environment_dict, pipeline_run, instance, raise_on_error=raise_on_error
    ) as pipeline_context:
        event_list = list(
            _pipeline_execution_iterator(pipeline_context, execution_plan, pipeline_run)
        )

        return PipelineExecutionResult(
            pipeline,
            run_config.run_id,
github dagster-io / dagster / python_modules / dagster / dagster / core / storage / event_log / sqlite / sqlite_event_log.py View on Github external
def __init__(self, base_dir, inst_data=None):
        '''Note that idempotent initialization of the SQLite database is done on a per-run_id
        basis in the body of connect, since each run is stored in a separate database.'''
        self._base_dir = os.path.abspath(check.str_param(base_dir, 'base_dir'))
        mkdir_p(self._base_dir)

        self._watchers = {}
        self._obs = Observer()
        self._obs.start()
        self._inst_data = check.opt_inst_param(inst_data, 'inst_data', ConfigurableClassData)
github dagster-io / dagster / python_modules / dagster / dagster / core / scheduler / storage.py View on Github external
def all_schedules(self, status=None):
        status = check.opt_inst_param(status, 'status', ScheduleStatus)

        if status:
            return [s for s in self._schedules.values() if s.status == status]

        return [s for s in self._schedules.values()]
github dagster-io / dagster / python_modules / dagster-graphql / dagster_graphql / schema / runs.py View on Github external
def from_dagster_event_record(graphene_info, event_record, dauphin_pipeline, execution_plan):
    # Lots of event types. Pylint thinks there are too many branches
    # pylint: disable=too-many-branches
    check.inst_param(event_record, 'event_record', EventRecord)
    check.param_invariant(event_record.is_dagster_event, 'event_record')
    check.opt_inst_param(
        dauphin_pipeline, 'dauphin_pipeline', graphene_info.schema.type_named('Pipeline')
    )
    check.opt_inst_param(execution_plan, 'execution_plan', ExecutionPlan)

    dagster_event = event_record.dagster_event
    basic_params = construct_basic_params(graphene_info, event_record, execution_plan)
    if dagster_event.event_type == DagsterEventType.STEP_START:
        return graphene_info.schema.type_named('ExecutionStepStartEvent')(**basic_params)
    elif dagster_event.event_type == DagsterEventType.STEP_SKIPPED:
        return graphene_info.schema.type_named('ExecutionStepSkippedEvent')(**basic_params)
    elif dagster_event.event_type == DagsterEventType.STEP_SUCCESS:
        return graphene_info.schema.type_named('ExecutionStepSuccessEvent')(**basic_params)
    elif dagster_event.event_type == DagsterEventType.STEP_INPUT:
        input_data = dagster_event.event_specific_data
        return graphene_info.schema.type_named('ExecutionStepInputEvent')(
            input_name=input_data.input_name, type_check=input_data.type_check_data, **basic_params
github dagster-io / dagster / python_modules / dagster / dagster / pandas_kernel / definitions.py View on Github external
def dataframe_input(name, sources=None, depends_on=None, expectations=None, input_callback=None):
    check.opt_inst_param(depends_on, 'depends_on', SolidDefinition)

    if sources is None:
        sources = [parquet_dataframe_source(), csv_dataframe_source(), table_dataframe_source()]

    def callback(context, output):
        _dataframe_input_callback(context, output)
        if input_callback:
            input_callback(context, output)

    return InputDefinition(
        name=name,
        sources=sources,
        depends_on=depends_on,
        input_callback=callback,
        expectations=expectations
    )
github dagster-io / dagster / python_modules / dagster / dagster / core / execution / plan / objects.py View on Github external
def __new__(cls, input_name, type_check_data):
        return super(StepInputData, cls).__new__(
            cls,
            input_name=check.str_param(input_name, 'input_name'),
            type_check_data=check.opt_inst_param(type_check_data, 'type_check_data', TypeCheckData),
        )
github dagster-io / dagster / python_modules / dagster / dagster / core / definitions.py View on Github external
def __init__(
        self,
        name,
        inputs,
        transform_fn,
        outputs,
        config_field=None,
        description=None,
        metadata=None,
    ):
        self.name = check_valid_name(name)
        self.input_defs = check.list_param(inputs, 'inputs', InputDefinition)
        self.transform_fn = check.callable_param(transform_fn, 'transform_fn')
        self.output_defs = check.list_param(outputs, 'outputs', OutputDefinition)
        self.description = check.opt_str_param(description, 'description')
        self.config_field = check.opt_inst_param(config_field, 'config_field', Field)
        self.metadata = check.opt_dict_param(metadata, 'metadata', key_type=str)
        self._input_dict = {inp.name: inp for inp in inputs}
        self._output_dict = {output.name: output for output in outputs}
github dagster-io / dagster / python_modules / dagster-graphql / dagster_graphql / cli.py View on Github external
def execute_query(handle, query, variables=None, use_sync_executor=False, instance=None):
    check.inst_param(handle, 'handle', ExecutionTargetHandle)
    check.str_param(query, 'query')
    check.opt_dict_param(variables, 'variables')
    # We allow external creation of the pipeline_run_storage to support testing contexts where we
    # need access to the underlying run storage
    instance = check.opt_inst_param(instance, 'instance', DagsterInstance, DagsterInstance.get())
    check.bool_param(use_sync_executor, 'use_sync_executor')

    query = query.strip('\'" \n\t')

    execution_manager = SynchronousExecutionManager()

    context = DagsterGraphQLContext(
        handle=handle, instance=instance, execution_manager=execution_manager, version=__version__
    )

    executor = SyncExecutor() if use_sync_executor else GeventExecutor()

    result = graphql(
        request_string=query,
        schema=create_schema(),
        context=context,