How to use the rsmtool.test_utils.do_run_evaluation function in rsmtool

To help you get started, we’ve selected a few rsmtool examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github EducationalTestingService / rsmtool / tests / test_experiment.py View on Github external
def test_run_experiment_lr_eval_with_h2_named_sc1():

    # basic rsmeval experiment with second rater analyses
    # but the label for the second rater is sc1 and there are
    # missing values for the first score

    source = 'lr-eval-with-h2-named-sc1'
    experiment_id = 'lr_eval_with_h2_named_sc1'
    config_file = join(test_dir,
                       'data',
                       'experiments',
                       source,
                       '{}.json'.format(experiment_id))
    do_run_evaluation(source, experiment_id, config_file)

    output_dir = join('test_outputs', source, 'output')
    expected_output_dir = join(test_dir, 'data', 'experiments', source, 'output')
    html_report = join('test_outputs', source, 'report', '{}_report.html'.format(experiment_id))

    csv_files = glob(join(output_dir, '*.csv'))
    for csv_file in csv_files:
        csv_filename = basename(csv_file)
        expected_csv_file = join(expected_output_dir, csv_filename)

        if exists(expected_csv_file):
            yield check_csv_output, csv_file, expected_csv_file

    yield check_consistency_files_exist, csv_files, experiment_id
    yield check_report, html_report
github EducationalTestingService / rsmtool / tests / test_experiment.py View on Github external
def test_run_experiment_lr_eval_same_system_human_score():

    # rsmeval experiment with the same value supplied
    # for both human score ans system score

    source = 'lr-eval-same-system-human-score'
    experiment_id = 'lr_eval_same_system_human_score'
    config_file = join(test_dir,
                       'data',
                       'experiments',
                       source,
                       '{}.json'.format(experiment_id))
    do_run_evaluation(source, experiment_id, config_file)
github EducationalTestingService / rsmtool / tests / test_experiment.py View on Github external
def test_run_experiment_eval_lr_with_missing_h2_column():

    # rsmeval experiment with `second_human_score_column`
    # set to a column that does not exist in the given
    # predictions file
    source = 'lr-eval-with-missing-h2-column'
    experiment_id = 'lr_eval_with_missing_h2_column'
    config_file = join(test_dir,
                       'data',
                       'experiments',
                       source,
                       '{}.json'.format(experiment_id))
    do_run_evaluation(source, experiment_id, config_file)
github EducationalTestingService / rsmtool / tests / test_experiment_rsmeval.py View on Github external
def test_run_experiment_lr_eval_with_repeated_ids():

    # rsmeval experiment with non-unique ids
    source = 'lr-eval-with-repeated-ids'
    experiment_id = 'lr_eval_with_repeated_ids'
    config_file = join(rsmtool_test_dir,
                       'data',
                       'experiments',
                       source,
                       '{}.json'.format(experiment_id))
    do_run_evaluation(source, experiment_id, config_file)
github EducationalTestingService / rsmtool / tests / test_experiment.py View on Github external
def test_run_experiment_lr_eval_with_custom_sections_and_order():

    # rsmeval experiment with custom sections and custom section
    # ordering

    source = 'lr-eval-with-custom-sections-and-order'
    experiment_id = 'lr_eval_with_custom_sections_and_order'
    config_file = join(test_dir,
                       'data',
                       'experiments',
                       source,
                       '{}.json'.format(experiment_id))
    do_run_evaluation(source, experiment_id, config_file)

    output_dir = join('test_outputs', source, 'output')
    expected_output_dir = join(test_dir, 'data', 'experiments', source, 'output')
    html_report = join('test_outputs', source, 'report', '{}_report.html'.format(experiment_id))

    csv_files = glob(join(output_dir, '*.csv'))
    for csv_file in csv_files:
        csv_filename = basename(csv_file)
        expected_csv_file = join(expected_output_dir, csv_filename)

        if exists(expected_csv_file):
            yield check_csv_output, csv_file, expected_csv_file

    yield check_report, html_report
github EducationalTestingService / rsmtool / tests / test_experiment.py View on Github external
def test_run_experiment_lr_eval_with_missing_scores():

    # basic rsmeval experiment with missing human scores

    source = 'lr-eval-with-missing-scores'
    experiment_id = 'lr_eval_with_missing_scores'
    config_file = join(test_dir,
                       'data',
                       'experiments',
                       source,
                       '{}.json'.format(experiment_id))
    do_run_evaluation(source, experiment_id, config_file)

    output_dir = join('test_outputs', source, 'output')
    expected_output_dir = join(test_dir, 'data', 'experiments', source, 'output')
    html_report = join('test_outputs', source, 'report', '{}_report.html'.format(experiment_id))

    csv_files = glob(join(output_dir, '*.csv'))
    for csv_file in csv_files:
        csv_filename = basename(csv_file)
        expected_csv_file = join(expected_output_dir, csv_filename)

        if exists(expected_csv_file):
            yield check_csv_output, csv_file, expected_csv_file

    yield check_report, html_report
github EducationalTestingService / rsmtool / tests / test_experiment.py View on Github external
def test_run_experiment_lr_eval_with_custom_order():

    # rsmeval experiment with custom section ordering

    source = 'lr-eval-with-custom-order'
    experiment_id = 'lr_eval_with_custom_order'
    config_file = join(test_dir,
                       'data',
                       'experiments',
                       source,
                       '{}.json'.format(experiment_id))
    do_run_evaluation(source, experiment_id, config_file)

    output_dir = join('test_outputs', source, 'output')
    expected_output_dir = join(test_dir, 'data', 'experiments', source, 'output')
    html_report = join('test_outputs', source, 'report', '{}_report.html'.format(experiment_id))

    csv_files = glob(join(output_dir, '*.csv'))
    for csv_file in csv_files:
        csv_filename = basename(csv_file)
        expected_csv_file = join(expected_output_dir, csv_filename)

        if exists(expected_csv_file):
            yield check_csv_output, csv_file, expected_csv_file

    yield check_report, html_report
github EducationalTestingService / rsmtool / tests / test_experiment.py View on Github external
def test_run_experiment_lr_eval_with_h2():

    # basic rsmeval experiment with second rater analyses

    source = 'lr-eval-with-h2'
    experiment_id = 'lr_eval_with_h2'
    config_file = join(test_dir,
                       'data',
                       'experiments',
                       source,
                       '{}.json'.format(experiment_id))
    do_run_evaluation(source, experiment_id, config_file)

    output_dir = join('test_outputs', source, 'output')
    expected_output_dir = join(test_dir, 'data', 'experiments', source, 'output')
    html_report = join('test_outputs', source, 'report', '{}_report.html'.format(experiment_id))

    csv_files = glob(join(output_dir, '*.csv'))
    for csv_file in csv_files:
        csv_filename = basename(csv_file)
        expected_csv_file = join(expected_output_dir, csv_filename)

        if exists(expected_csv_file):
            yield check_csv_output, csv_file, expected_csv_file

    yield check_report, html_report
github EducationalTestingService / rsmtool / tests / test_experiment.py View on Github external
def test_run_experiment_lr_eval_with_scaling():

    # rsmeval evaluation experiment with scaling

    source = 'lr-eval-with-scaling'
    experiment_id = 'lr_evaluation_with_scaling'
    config_file = join(test_dir,
                       'data',
                       'experiments',
                       source,
                       '{}.json'.format(experiment_id))
    do_run_evaluation(source, experiment_id, config_file)

    output_dir = join('test_outputs', source, 'output')
    expected_output_dir = join(test_dir, 'data', 'experiments', source, 'output')
    html_report = join('test_outputs', source, 'report', '{}_report.html'.format(experiment_id))

    csv_files = glob(join(output_dir, '*.csv'))
    for csv_file in csv_files:
        csv_filename = basename(csv_file)
        expected_csv_file = join(expected_output_dir, csv_filename)

        if exists(expected_csv_file):
            yield check_csv_output, csv_file, expected_csv_file

    yield check_report, html_report
github EducationalTestingService / rsmtool / tests / test_experiment.py View on Github external
def test_run_experiment_lr_eval_with_custom_sections():

    # rsmeval experiment with custom sections

    source = 'lr-eval-with-custom-sections'
    experiment_id = 'lr_eval_with_custom_sections'
    config_file = join(test_dir,
                       'data',
                       'experiments',
                       source,
                       '{}.json'.format(experiment_id))
    do_run_evaluation(source, experiment_id, config_file)

    output_dir = join('test_outputs', source, 'output')
    expected_output_dir = join(test_dir, 'data', 'experiments', source, 'output')
    html_report = join('test_outputs', source, 'report', '{}_report.html'.format(experiment_id))

    csv_files = glob(join(output_dir, '*.csv'))
    for csv_file in csv_files:
        csv_filename = basename(csv_file)
        expected_csv_file = join(expected_output_dir, csv_filename)

        if exists(expected_csv_file):
            yield check_csv_output, csv_file, expected_csv_file

    yield check_report, html_report