Skip to content

Commit 9164a93

Browse files
committed
Undo whitespace changes to files not touched otherwise
1 parent 4f49efd commit 9164a93

8 files changed

Lines changed: 57 additions & 16 deletions

File tree

tests/test_flows/test_flow.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
import copy
66
import hashlib
77
import re
8-
import os
98
import time
109
from packaging.version import Version
1110
from unittest import mock
@@ -34,6 +33,7 @@
3433
from openml.testing import SimpleImputer, TestBase
3534

3635

36+
3737
class TestFlow(TestBase):
3838
_multiprocess_can_split_ = True
3939

@@ -179,6 +179,7 @@ def test_to_xml_from_xml(self):
179179
# Would raise exception if they are not legal
180180
openml.flows.functions.assert_flows_equal(new_flow, flow)
181181
assert new_flow is not flow
182+
182183
@pytest.mark.sklearn()
183184
@pytest.mark.test_server()
184185
def test_publish_flow(self):
@@ -220,6 +221,7 @@ def test_publish_existing_flow(self, flow_exists_mock):
220221
TestBase.logger.info(
221222
f"collected from {__file__.split('/')[-1]}: {flow.flow_id}",
222223
)
224+
223225
@pytest.mark.sklearn()
224226
@pytest.mark.test_server()
225227
def test_publish_flow_with_similar_components(self):
@@ -270,6 +272,7 @@ def test_publish_flow_with_similar_components(self):
270272
flow3.publish()
271273
TestBase._mark_entity_for_removal("flow", flow3.flow_id, flow3.name)
272274
TestBase.logger.info(f"collected from {__file__.split('/')[-1]}: {flow3.flow_id}")
275+
273276
@pytest.mark.sklearn()
274277
@pytest.mark.test_server()
275278
def test_semi_legal_flow(self):
@@ -379,6 +382,7 @@ def get_sentinel():
379382

380383
flow_id = openml.flows.flow_exists(name, version)
381384
assert not flow_id
385+
382386
@pytest.mark.sklearn()
383387
@pytest.mark.test_server()
384388
def test_existing_flow_exists(self):
@@ -419,6 +423,7 @@ def test_existing_flow_exists(self):
419423
flow.external_version,
420424
)
421425
assert downloaded_flow_id == flow.flow_id
426+
422427
@pytest.mark.sklearn()
423428
@pytest.mark.test_server()
424429
def test_sklearn_to_upload_to_flow(self):

tests/test_flows/test_flow_functions.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
from unittest import mock
1313
from unittest.mock import patch
1414

15-
import os
1615
import pandas as pd
1716
import pytest
1817
import requests
@@ -309,6 +308,7 @@ def test_get_flow1(self):
309308
self.use_production_server()
310309
flow = openml.flows.get_flow(1)
311310
assert flow.external_version is None
311+
312312
@pytest.mark.sklearn()
313313
@pytest.mark.test_server()
314314
def test_get_flow_reinstantiate_model(self):
@@ -391,6 +391,7 @@ def test_get_flow_reinstantiate_flow_not_strict_pre_023(self):
391391
flow = openml.flows.get_flow(flow_id=8175, reinstantiate=True, strict_version=False)
392392
assert flow.flow_id is None
393393
assert "sklearn==0.19.1" not in flow.dependencies
394+
394395
@pytest.mark.sklearn()
395396
@pytest.mark.test_server()
396397
def test_get_flow_id(self):

tests/test_openml/test_api_calls.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77

88
import minio
99
import pytest
10-
import os
1110

1211
import openml
1312
from openml.config import ConfigurationForExamples
@@ -20,6 +19,7 @@ class TestConfig(openml.testing.TestBase):
2019
def test_too_long_uri(self):
2120
with pytest.raises(openml.exceptions.OpenMLServerError, match="URI too long!"):
2221
openml.datasets.list_datasets(data_id=list(range(10000)))
22+
2323
@unittest.mock.patch("time.sleep")
2424
@unittest.mock.patch("requests.Session")
2525
@pytest.mark.test_server()

tests/test_runs/test_run.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,7 @@ def _check_array(array, type_):
117117
np.testing.assert_array_equal(string_part, string_part_prime)
118118
else:
119119
assert run_prime_trace_content is None
120+
120121
@pytest.mark.sklearn()
121122
@pytest.mark.test_server()
122123
def test_to_from_filesystem_vanilla(self):
@@ -151,6 +152,7 @@ def test_to_from_filesystem_vanilla(self):
151152
TestBase.logger.info(
152153
f"collected from {__file__.split('/')[-1]}: {run_prime.run_id}",
153154
)
155+
154156
@pytest.mark.sklearn()
155157
@pytest.mark.flaky()
156158
@pytest.mark.test_server()
@@ -186,6 +188,7 @@ def test_to_from_filesystem_search(self):
186188
TestBase.logger.info(
187189
f"collected from {__file__.split('/')[-1]}: {run_prime.run_id}",
188190
)
191+
189192
@pytest.mark.sklearn()
190193
@pytest.mark.test_server()
191194
def test_to_from_filesystem_no_model(self):
@@ -291,6 +294,7 @@ def assert_run_prediction_data(task, run, model):
291294
# Assert correctness
292295
assert_method(y_pred, saved_y_pred)
293296
assert_method(y_test, saved_y_test)
297+
294298
@pytest.mark.sklearn()
295299
@pytest.mark.test_server()
296300
def test_publish_with_local_loaded_flow(self):
@@ -334,6 +338,7 @@ def test_publish_with_local_loaded_flow(self):
334338
# make sure the flow is published as part of publishing the run.
335339
assert openml.flows.flow_exists(flow.name, flow.external_version)
336340
openml.runs.get_run(loaded_run.run_id)
341+
337342
@pytest.mark.sklearn()
338343
@pytest.mark.test_server()
339344
def test_offline_and_online_run_identical(self):

tests/test_runs/test_run_functions.py

Lines changed: 32 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -396,6 +396,7 @@ def _check_sample_evaluations(
396396
# Windows seems to get an eval-time of 0 sometimes.
397397
assert evaluation > 0
398398
assert evaluation < max_time_allowed
399+
399400
@pytest.mark.sklearn()
400401
@pytest.mark.test_server()
401402
def test_run_regression_on_classif_task(self):
@@ -412,6 +413,7 @@ def test_run_regression_on_classif_task(self):
412413
model=clf,
413414
task=task,
414415
)
416+
415417
@pytest.mark.sklearn()
416418
@pytest.mark.test_server()
417419
def test_check_erronous_sklearn_flow_fails(self):
@@ -624,6 +626,7 @@ def _run_and_upload_regression(
624626
task_type=task_type,
625627
sentinel=sentinel,
626628
)
629+
627630
@pytest.mark.sklearn()
628631
@pytest.mark.test_server()
629632
def test_run_and_upload_logistic_regression(self):
@@ -632,6 +635,7 @@ def test_run_and_upload_logistic_regression(self):
632635
n_missing_vals = self.TEST_SERVER_TASK_SIMPLE["n_missing_vals"]
633636
n_test_obs = self.TEST_SERVER_TASK_SIMPLE["n_test_obs"]
634637
self._run_and_upload_classification(lr, task_id, n_missing_vals, n_test_obs, "62501")
638+
635639
@pytest.mark.sklearn()
636640
@pytest.mark.test_server()
637641
def test_run_and_upload_linear_regression(self):
@@ -662,6 +666,7 @@ def test_run_and_upload_linear_regression(self):
662666
n_missing_vals = self.TEST_SERVER_TASK_REGRESSION["n_missing_vals"]
663667
n_test_obs = self.TEST_SERVER_TASK_REGRESSION["n_test_obs"]
664668
self._run_and_upload_regression(lr, task_id, n_missing_vals, n_test_obs, "62501")
669+
665670
@pytest.mark.sklearn()
666671
@pytest.mark.test_server()
667672
def test_run_and_upload_pipeline_dummy_pipeline(self):
@@ -675,6 +680,7 @@ def test_run_and_upload_pipeline_dummy_pipeline(self):
675680
n_missing_vals = self.TEST_SERVER_TASK_SIMPLE["n_missing_vals"]
676681
n_test_obs = self.TEST_SERVER_TASK_SIMPLE["n_test_obs"]
677682
self._run_and_upload_classification(pipeline1, task_id, n_missing_vals, n_test_obs, "62501")
683+
678684
@pytest.mark.sklearn()
679685
@unittest.skipIf(
680686
Version(sklearn.__version__) < Version("0.20"),
@@ -791,6 +797,7 @@ def test_run_and_upload_knn_pipeline(self, warnings_mock):
791797
if _warnings[0][0] == warning_msg:
792798
call_count += 1
793799
assert call_count == 3
800+
794801
@pytest.mark.sklearn()
795802
@pytest.mark.test_server()
796803
def test_run_and_upload_gridsearch(self):
@@ -813,6 +820,7 @@ def test_run_and_upload_gridsearch(self):
813820
flow_expected_rsv="62501",
814821
)
815822
assert len(run.trace.trace_iterations) == 9
823+
816824
@pytest.mark.sklearn()
817825
@pytest.mark.test_server()
818826
def test_run_and_upload_randomsearch(self):
@@ -845,6 +853,7 @@ def test_run_and_upload_randomsearch(self):
845853
assert len(run.trace.trace_iterations) == 5
846854
trace = openml.runs.get_run_trace(run.run_id)
847855
assert len(trace.trace_iterations) == 5
856+
848857
@pytest.mark.sklearn()
849858
@pytest.mark.test_server()
850859
def test_run_and_upload_maskedarrays(self):
@@ -872,6 +881,7 @@ def test_run_and_upload_maskedarrays(self):
872881
)
873882

874883
##########################################################################
884+
875885
@pytest.mark.sklearn()
876886
@pytest.mark.test_server()
877887
def test_learning_curve_task_1(self):
@@ -896,6 +906,7 @@ def test_learning_curve_task_1(self):
896906
flow_expected_rsv="62501",
897907
)
898908
self._check_sample_evaluations(run.sample_evaluations, num_repeats, num_folds, num_samples)
909+
899910
@pytest.mark.sklearn()
900911
@pytest.mark.test_server()
901912
def test_learning_curve_task_2(self):
@@ -932,6 +943,7 @@ def test_learning_curve_task_2(self):
932943
flow_expected_rsv="62501",
933944
)
934945
self._check_sample_evaluations(run.sample_evaluations, num_repeats, num_folds, num_samples)
946+
935947
@pytest.mark.sklearn()
936948
@unittest.skipIf(
937949
Version(sklearn.__version__) < Version("0.21"),
@@ -1010,6 +1022,7 @@ def _test_local_evaluations(self, run):
10101022
for idx in range(len(alt_scores)):
10111023
assert alt_scores[idx] >= 0
10121024
assert alt_scores[idx] <= 1
1025+
10131026
@pytest.mark.sklearn()
10141027
@pytest.mark.test_server()
10151028
def test_local_run_swapped_parameter_order_model(self):
@@ -1025,6 +1038,7 @@ def test_local_run_swapped_parameter_order_model(self):
10251038
)
10261039

10271040
self._test_local_evaluations(run)
1041+
10281042
@pytest.mark.sklearn()
10291043
@unittest.skipIf(
10301044
Version(sklearn.__version__) < Version("0.20"),
@@ -1053,6 +1067,7 @@ def test_local_run_swapped_parameter_order_flow(self):
10531067
)
10541068

10551069
self._test_local_evaluations(run)
1070+
10561071
@pytest.mark.sklearn()
10571072
@unittest.skipIf(
10581073
Version(sklearn.__version__) < Version("0.20"),
@@ -1090,6 +1105,7 @@ def test_online_run_metric_score(self):
10901105
run = openml.runs.get_run(9864498)
10911106

10921107
self._test_local_evaluations(run)
1108+
10931109
@pytest.mark.sklearn()
10941110
@unittest.skipIf(
10951111
Version(sklearn.__version__) < Version("0.20"),
@@ -1151,6 +1167,7 @@ def test_initialize_model_from_run(self):
11511167

11521168
assert flowS.components["Imputer"].parameters["strategy"] == '"most_frequent"'
11531169
assert flowS.components["VarianceThreshold"].parameters["threshold"] == "0.05"
1170+
11541171
@pytest.mark.sklearn()
11551172
@unittest.skipIf(
11561173
Version(sklearn.__version__) < Version("0.20"),
@@ -1210,6 +1227,7 @@ def test__run_exists(self):
12101227
assert setup_exists > 0, "Server says setup of run does not exist."
12111228
run_ids = run_exists(task.task_id, setup_exists)
12121229
assert run_ids, (run_ids, clf)
1230+
12131231
@pytest.mark.sklearn()
12141232
@pytest.mark.test_server()
12151233
def test_run_with_illegal_flow_id(self):
@@ -1229,6 +1247,7 @@ def test_run_with_illegal_flow_id(self):
12291247
flow=flow,
12301248
avoid_duplicate_runs=True,
12311249
)
1250+
12321251
@pytest.mark.sklearn()
12331252
@pytest.mark.test_server()
12341253
def test_run_with_illegal_flow_id_after_load(self):
@@ -1286,6 +1305,7 @@ def test_run_with_illegal_flow_id_1(self):
12861305
flow=flow_new,
12871306
avoid_duplicate_runs=True,
12881307
)
1308+
12891309
@pytest.mark.sklearn()
12901310
@pytest.mark.test_server()
12911311
def test_run_with_illegal_flow_id_1_after_load(self):
@@ -1324,6 +1344,7 @@ def test_run_with_illegal_flow_id_1_after_load(self):
13241344
expected_message_regex,
13251345
loaded_run.publish,
13261346
)
1347+
13271348
@pytest.mark.sklearn()
13281349
@unittest.skipIf(
13291350
Version(sklearn.__version__) < Version("0.20"),
@@ -1553,6 +1574,7 @@ def test_get_runs_list_by_tag(self):
15531574
# Don't remove the size restriction: this query is too expensive without
15541575
runs = openml.runs.list_runs(tag="curves", size=2)
15551576
assert len(runs) >= 1
1577+
15561578
@pytest.mark.sklearn()
15571579
@unittest.skipIf(
15581580
Version(sklearn.__version__) < Version("0.20"),
@@ -1589,6 +1611,7 @@ def test_run_on_dataset_with_missing_labels_dataframe(self):
15891611
for row in data_content:
15901612
# repeat, fold, row_id, 6 confidences, prediction and correct label
15911613
assert len(row) == 12
1614+
15921615
@pytest.mark.sklearn()
15931616
@unittest.skipIf(
15941617
Version(sklearn.__version__) < Version("0.20"),
@@ -1642,6 +1665,7 @@ def test_get_uncached_run(self):
16421665
openml.config.set_root_cache_directory(self.static_cache_dir)
16431666
with pytest.raises(openml.exceptions.OpenMLCacheException):
16441667
openml.runs.functions._get_cached_run(10)
1668+
16451669
@pytest.mark.sklearn()
16461670
@pytest.mark.test_server()
16471671
def test_run_flow_on_task_downloaded_flow(self):
@@ -1741,6 +1765,9 @@ def test_format_prediction_task_regression(self):
17411765
ignored_input = [0] * 5
17421766
res = format_prediction(regression, *ignored_input)
17431767
self.assertListEqual(res, [0] * 5)
1768+
1769+
1770+
17441771
@unittest.skipIf(
17451772
Version(sklearn.__version__) < Version("0.20"),
17461773
reason="SimpleImputer doesn't handle mixed type DataFrame as input",
@@ -1836,6 +1863,8 @@ def test_delete_unknown_run(mock_delete, test_files_directory, test_api_key):
18361863
run_url = f"{openml.config.TEST_SERVER_URL}/api/v1/xml/run/9999999"
18371864
assert run_url == mock_delete.call_args.args[0]
18381865
assert test_api_key == mock_delete.call_args.kwargs.get("params", {}).get("api_key")
1866+
1867+
18391868
@pytest.mark.sklearn()
18401869
@unittest.skipIf(
18411870
Version(sklearn.__version__) < Version("0.21"),
@@ -1916,6 +1945,8 @@ def test__run_task_get_arffcontent_2(parallel_mock):
19161945
decimal=2,
19171946
err_msg="Observed performance scores deviate from expected ones.",
19181947
)
1948+
1949+
19191950
@pytest.mark.sklearn()
19201951
@unittest.skipIf(
19211952
Version(sklearn.__version__) < Version("0.21"),
@@ -1985,7 +2016,6 @@ def test_joblib_backends(parallel_mock, n_jobs, backend, call_count):
19852016
n_jobs=n_jobs,
19862017
)
19872018
from openml_sklearn import SklearnExtension
1988-
19892019
extension = SklearnExtension()
19902020
with parallel_backend(backend, n_jobs=n_jobs):
19912021
res = openml.runs.functions._run_task_get_arffcontent(
@@ -2002,4 +2032,4 @@ def test_joblib_backends(parallel_mock, n_jobs, backend, call_count):
20022032
# *_time_millis_* not recorded when n_jobs = -1
20032033
assert len(res[2]["predictive_accuracy"][0]) == 10
20042034
assert len(res[3]["predictive_accuracy"][0]) == 10
2005-
assert parallel_mock.call_count == call_count
2035+
assert parallel_mock.call_count == call_count

0 commit comments

Comments
 (0)