How to use the openml.config function in openml

To help you get started, we’ve selected a few openml examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github openml / openml-python / tests / test_flows / test_flow_functions.py View on Github external
def test_list_flows_output_format(self):
        openml.config.server = self.production_server
        # We can only perform a smoke test here because we test on dynamic
        # data from the internet...
        flows = openml.flows.list_flows(output_format='dataframe')
        self.assertIsInstance(flows, pd.DataFrame)
        self.assertGreaterEqual(len(flows), 1500)
github openml / openml-python / tests / test_datasets / test_dataset_functions.py View on Github external
def ensure_absence_of_real_data():
            self.assertFalse(os.path.exists(os.path.join(
                openml.config.get_cache_directory(), "datasets", "1", "dataset.arff")))
github openml / openml-python / tests / test_datasets / test_dataset_functions.py View on Github external
def _remove_pickle_files(self):
        self.lock_path = os.path.join(openml.config.get_cache_directory(), 'locks')
        for did in ['-1', '2']:
            with lockutils.external_lock(
                    name='datasets.functions.get_dataset:%s' % did,
                    lock_path=self.lock_path,
            ):
                pickle_path = os.path.join(openml.config.get_cache_directory(), 'datasets',
                                           did, 'dataset.pkl.py3')
                try:
                    os.remove(pickle_path)
                except (OSError, FileNotFoundError):
                    #  Replaced a bare except. Not sure why either of these would be acceptable.
                    pass
github openml / openml-python / tests / test_extensions / test_sklearn_extension / test_sklearn_extension.py View on Github external
def test_can_handle_flow(self):
        openml.config.server = self.production_server

        R_flow = openml.flows.get_flow(6794)
        assert not self.extension.can_handle_flow(R_flow)
        old_3rd_party_flow = openml.flows.get_flow(7660)
        assert self.extension.can_handle_flow(old_3rd_party_flow)

        openml.config.server = self.test_server
github openml / openml-python / develop / _downloads / 0d1d1c06933bd6d32bda534cb0aa0e53 / create_upload_tutorial.py View on Github external
row_id_attribute=None,
    ignore_attribute=None,
    citation=None,
    attributes='auto',
    data=df,
    version_label='example',
)

############################################################################

xor_dataset.publish()
print(f"URL for dataset: {xor_dataset.openml_url}")


############################################################################
openml.config.stop_using_configuration_for_example()
github openml / openml-python / develop / _downloads / 6b1e091fbd3ac8d106b6552c91cf05cc / run_setup_tutorial.py View on Github external
# and run the task again
run_duplicate = openml.runs.run_model_on_task(
    model_duplicate, task, avoid_duplicate_runs=False)


###############################################################################
# 3) We will verify that the obtained results are exactly the same.
###############################################################################

# the run has stored all predictions in the field data content
np.testing.assert_array_equal(run_original.data_content,
                              run_duplicate.data_content)

###############################################################################

openml.config.stop_using_configuration_for_example()
github openml / openml-python / master / _downloads / 911f16d4db6b665d864c4483331b062a / introduction_tutorial.py View on Github external
############################################################################
# Simple Example
# ^^^^^^^^^^^^^^
# Download the OpenML task for the eeg-eye-state.
task = openml.tasks.get_task(403)
data = openml.datasets.get_dataset(task.dataset_id)
clf = neighbors.KNeighborsClassifier(n_neighbors=5)
run = openml.runs.run_model_on_task(clf, task, avoid_duplicate_runs=False)
# Publish the experiment on OpenML (optional, requires an API key).
# For this tutorial, our configuration publishes to the test server
# as to not crowd the main server with runs created by examples.
myrun = run.publish()
print("kNN on %s: http://test.openml.org/r/%d" % (data.name, myrun.run_id))

############################################################################
openml.config.stop_using_configuration_for_example()
github openml / openml-python / develop / _downloads / a5a49610d34e7fd2cd1c2f5121cc5f5f / introduction_tutorial.py View on Github external
############################################################################
# Simple Example
# ^^^^^^^^^^^^^^
# Download the OpenML task for the eeg-eye-state.
task = openml.tasks.get_task(403)
data = openml.datasets.get_dataset(task.dataset_id)
clf = neighbors.KNeighborsClassifier(n_neighbors=5)
run = openml.runs.run_model_on_task(clf, task, avoid_duplicate_runs=False)
# Publish the experiment on OpenML (optional, requires an API key).
# For this tutorial, our configuration publishes to the test server
# as to not crowd the main server with runs created by examples.
myrun = run.publish()
print(f"kNN on {data.name}: http://test.openml.org/r/{myrun.run_id}")

############################################################################
openml.config.stop_using_configuration_for_example()
github openml / openml-python / examples / 30_extended / study_tutorial.py View on Github external
# we simply generate a random uuid.
alias = uuid.uuid4().hex

new_study = openml.study.create_study(
    name='Test-Study',
    description='Test study for the Python tutorial on studies',
    run_ids=run_ids,
    alias=alias,
    benchmark_suite=suite.study_id,
)
new_study.publish()
print(new_study)


############################################################################
openml.config.stop_using_configuration_for_example()
github openml / openml-python / examples / sklearn / openml_run_example.py View on Github external
clf = pipeline.Pipeline(
    steps=[
        ('imputer', impute.SimpleImputer()),
        ('estimator', tree.DecisionTreeClassifier())
    ]
)
############################################################################
# Download the OpenML task for the german credit card dataset.
task = openml.tasks.get_task(97)
############################################################################
# Run the scikit-learn model on the task (requires an API key).
run = openml.runs.run_model_on_task(clf, task)
# Publish the experiment on OpenML (optional, requires an API key).
run.publish()

print('URL for run: %s/run/%d' % (openml.config.server, run.run_id))

############################################################################
openml.config.stop_using_configuration_for_example()