How to use rsmtool - 10 common examples

To help you get started, we’ve selected a few rsmtool examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github EducationalTestingService / rsmtool / tests / test_utils.py View on Github external
def test_run_subparser_no_output_directory(self):
        """
        test run subparser where no output directory is required
        """
        parser = setup_rsmcmd_parser('test', uses_output_directory=False)
        config_file = join(rsmtool_test_dir, 'data', 'experiments', 'lr', 'lr.json')
        parsed_namespace = parser.parse_args(f"run {config_file}".split())
        expected_namespace = argparse.Namespace(config_file=config_file,
                                                subcommand='run')
        ok_(not hasattr(parsed_namespace, 'output_dir'))
        eq_(parsed_namespace, expected_namespace)
github EducationalTestingService / rsmtool / tests / test_utils.py View on Github external
def test_run_subparser_with_output_directory(self):
        """
        test run subparser with a specified output directory
        """
        parser = setup_rsmcmd_parser('test')
        config_file = join(rsmtool_test_dir, 'data', 'experiments', 'lr', 'lr.json')
        parsed_namespace = parser.parse_args(f"run {config_file} /path/to/output/dir".split())

        expected_namespace = argparse.Namespace(config_file=config_file,
                                                output_dir='/path/to/output/dir',
                                                subcommand='run')
        eq_(parsed_namespace, expected_namespace)
github EducationalTestingService / rsmtool / tests / test_utils.py View on Github external
def test_generate_subparser_with_subgroups_and_interactive_short_flags_together(self):
        """
        test generate subparser with short subgroups and interactive flags together
        """
        parser = setup_rsmcmd_parser('test', uses_subgroups=True)
        parsed_namespace = parser.parse_args('generate -ig'.split())
        expected_namespace = argparse.Namespace(subcommand='generate',
                                                quiet=False,
                                                interactive=True,
                                                subgroups=True)
        eq_(parsed_namespace, expected_namespace)
github EducationalTestingService / rsmtool / tests / test_utils.py View on Github external
def test_generate_subparser_with_only_interactive_short_flag(self):
        """
        test generate subparser with only the short interactive flag
        """
        parser = setup_rsmcmd_parser('test')
        parsed_namespace = parser.parse_args('generate -i'.split())
        expected_namespace = argparse.Namespace(subcommand='generate',
                                                interactive=True,
                                                quiet=False)
        eq_(parsed_namespace, expected_namespace)
github EducationalTestingService / rsmtool / tests / test_utils.py View on Github external
def test_generate_subparser_with_subgroups_and_interactive_short_flags(self):
        """
        test generate subparser with short subgroups and interactive flags
        """
        parser = setup_rsmcmd_parser('test', uses_subgroups=True)
        parsed_namespace = parser.parse_args('generate -i -g'.split())
        expected_namespace = argparse.Namespace(subcommand='generate',
                                                quiet=False,
                                                interactive=True,
                                                subgroups=True)
        eq_(parsed_namespace, expected_namespace)
github EducationalTestingService / rsmtool / tests / test_utils.py View on Github external
def test_generate_subparser_help_flag(self):
        """
        test generate subparser with --help specified
        """
        parser = setup_rsmcmd_parser('test')
        # we need to patch sys.exit since --help just exists otherwise
        with patch('sys.exit') as exit_mock:
            parsed_namespace = parser.parse_args('generate --help'.split())
        expected_namespace = argparse.Namespace(subcommand='generate',
                                                interactive=False,
                                                quiet=False)
        eq_(parsed_namespace, expected_namespace)
        assert exit_mock.called
github EducationalTestingService / rsmtool / tests / test_experiment_rsmtool_3.py View on Github external
def test_run_experiment_duplicate_feature_names():

    # rsmtool experiment with duplicate feature names
    source = 'lr-with-duplicate-feature-names'
    experiment_id = 'lr_with_duplicate_feature_names'
    config_file = join(rsmtool_test_dir,
                       'data',
                       'experiments',
                       source,
                       '{}.json'.format(experiment_id))
    do_run_experiment(source, experiment_id, config_file)
github EducationalTestingService / rsmtool / tests / test_experiment_rsmtool_3.py View on Github external
def test_run_experiment_wrong_train_file_path():
    # basic experiment with the path in train_file field pointing to
    # a non-existing file
    source = 'lr-wrong-path'
    experiment_id = 'lr'
    config_file = join(rsmtool_test_dir,
                       'data',
                       'experiments',
                       source,
                       '{}.json'.format(experiment_id))
    do_run_experiment(source, experiment_id, config_file)
github EducationalTestingService / rsmtool / tests / test_experiment.py View on Github external
def test_run_experiment_adaboost():

    # basic experiment with AdaBoostRegressor model

    source = 'adaboost'
    experiment_id = 'AdaBoost'
    config_file = join(test_dir,
                       'data',
                       'experiments',
                       source,
                       '{}.json'.format(experiment_id))
    do_run_experiment(source, experiment_id, config_file)

    output_dir = join('test_outputs', source, 'output')
    expected_output_dir = join(test_dir, 'data', 'experiments', source, 'output')
    html_report = join('test_outputs', source, 'report', '{}_report.html'.format(experiment_id))

    csv_files = glob(join(output_dir, '*.csv'))
    for csv_file in csv_files:
        csv_filename = basename(csv_file)
        expected_csv_file = join(expected_output_dir, csv_filename)

        if exists(expected_csv_file):
            yield check_csv_output, csv_file, expected_csv_file

    yield check_all_csv_exist, csv_files, experiment_id, 'skll'
    yield check_report, html_report
github EducationalTestingService / rsmtool / tests / test_experiment.py View on Github external
def test_run_experiment_lr_subgroups_with_h2():

    # basic experiment with subgroups and second
    # rater analyses

    source = 'lr-subgroups-with-h2'
    experiment_id = 'lr_subgroups_with_h2'
    config_file = join(test_dir,
                       'data',
                       'experiments',
                       source,
                       '{}.json'.format(experiment_id))
    do_run_experiment(source, experiment_id, config_file)

    output_dir = join('test_outputs', source, 'output')
    expected_output_dir = join(test_dir, 'data', 'experiments', source, 'output')
    html_report = join('test_outputs', source, 'report', '{}_report.html'.format(experiment_id))

    csv_files = glob(join(output_dir, '*.csv'))
    for csv_file in csv_files:
        csv_filename = basename(csv_file)
        expected_csv_file = join(expected_output_dir, csv_filename)

        if exists(expected_csv_file):
            yield check_csv_output, csv_file, expected_csv_file

    yield check_all_csv_exist, csv_files, experiment_id, 'rsmtool'
    yield check_consistency_files_exist, csv_files, experiment_id
    yield check_scaled_coefficients, source, experiment_id