-
-
Notifications
You must be signed in to change notification settings - Fork 270
Expand file tree
/
Copy pathtest_run.py
More file actions
400 lines (344 loc) · 15.2 KB
/
test_run.py
File metadata and controls
400 lines (344 loc) · 15.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
# License: BSD 3-Clause
from __future__ import annotations
import os
import random
from time import time
import numpy as np
import pytest
import xmltodict
from openml_sklearn import SklearnExtension
from sklearn.base import clone
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.tree import DecisionTreeClassifier
import openml
from openml import OpenMLRun
from openml.testing import SimpleImputer, TestBase
class TestRun(TestBase):
# Splitting not helpful, these test's don't rely on the server and take
# less than 1 seconds
@pytest.mark.test_server()
def test_tagging(self):
runs = openml.runs.list_runs(size=1)
assert not runs.empty, "Test server state is incorrect"
run_id = runs["run_id"].iloc[0]
run = openml.runs.get_run(run_id)
# tags can be at most 64 alphanumeric (+ underscore) chars
unique_indicator = str(time()).replace(".", "")
tag = f"test_tag_TestRun_{unique_indicator}"
runs = openml.runs.list_runs(tag=tag)
assert len(runs) == 0
run.push_tag(tag)
runs = openml.runs.list_runs(tag=tag)
assert len(runs) == 1
assert run_id in runs["run_id"]
run.remove_tag(tag)
runs = openml.runs.list_runs(tag=tag)
assert len(runs) == 0
@staticmethod
def _test_prediction_data_equal(run, run_prime):
# Determine which attributes are numeric and which not
num_cols = np.array(
[d_type == "NUMERIC" for _, d_type in run._generate_arff_dict()["attributes"]],
)
# Get run data consistently
# (For run from server, .data_content does not exist)
run_data_content = run.predictions.values
run_prime_data_content = run_prime.predictions.values
# Assert numeric and string parts separately
numeric_part = np.array(run_data_content[:, num_cols], dtype=float)
numeric_part_prime = np.array(run_prime_data_content[:, num_cols], dtype=float)
string_part = run_data_content[:, ~num_cols]
string_part_prime = run_prime_data_content[:, ~num_cols]
np.testing.assert_array_almost_equal(numeric_part, numeric_part_prime)
np.testing.assert_array_equal(string_part, string_part_prime)
def _test_run_obj_equals(self, run, run_prime):
for dictionary in ["evaluations", "fold_evaluations", "sample_evaluations"]:
if getattr(run, dictionary) is not None:
self.assertDictEqual(getattr(run, dictionary), getattr(run_prime, dictionary))
else:
# should be none or empty
other = getattr(run_prime, dictionary)
if other is not None:
self.assertDictEqual(other, {})
assert run._to_xml() == run_prime._to_xml()
self._test_prediction_data_equal(run, run_prime)
# Test trace
run_trace_content = run.trace.trace_to_arff()["data"] if run.trace is not None else None
if run_prime.trace is not None:
run_prime_trace_content = run_prime.trace.trace_to_arff()["data"]
else:
run_prime_trace_content = None
if run_trace_content is not None:
def _check_array(array, type_):
for line in array:
for entry in line:
assert isinstance(entry, type_)
int_part = [line[:3] for line in run_trace_content]
_check_array(int_part, int)
int_part_prime = [line[:3] for line in run_prime_trace_content]
_check_array(int_part_prime, int)
float_part = np.array(
np.array(run_trace_content)[:, 3:4],
dtype=float,
)
float_part_prime = np.array(
np.array(run_prime_trace_content)[:, 3:4],
dtype=float,
)
bool_part = [line[4] for line in run_trace_content]
bool_part_prime = [line[4] for line in run_prime_trace_content]
for bp, bpp in zip(bool_part, bool_part_prime):
assert bp in ["true", "false"]
assert bpp in ["true", "false"]
string_part = np.array(run_trace_content)[:, 5:]
string_part_prime = np.array(run_prime_trace_content)[:, 5:]
np.testing.assert_array_almost_equal(int_part, int_part_prime)
np.testing.assert_array_almost_equal(float_part, float_part_prime)
assert bool_part == bool_part_prime
np.testing.assert_array_equal(string_part, string_part_prime)
else:
assert run_prime_trace_content is None
@pytest.mark.sklearn()
@pytest.mark.test_server()
def test_to_from_filesystem_vanilla(self):
model = Pipeline(
[
("imputer", SimpleImputer(strategy="mean")),
("classifier", DecisionTreeClassifier(max_depth=1)),
],
)
task = openml.tasks.get_task(119) # diabetes; crossvalidation
run = openml.runs.run_model_on_task(
model=model,
task=task,
add_local_measures=False,
upload_flow=True,
)
cache_path = os.path.join(
self.workdir,
"runs",
str(random.getrandbits(128)),
)
run.to_filesystem(cache_path)
run_prime = openml.runs.OpenMLRun.from_filesystem(cache_path)
# The flow has been uploaded to server, so only the reference flow_id should be present
assert run_prime.flow_id is not None
assert run_prime.flow is None
self._test_run_obj_equals(run, run_prime)
run_prime.publish()
TestBase._mark_entity_for_removal("run", run_prime.run_id)
TestBase.logger.info(
f"collected from {__file__.split('/')[-1]}: {run_prime.run_id}",
)
@pytest.mark.sklearn()
@pytest.mark.flaky()
@pytest.mark.test_server()
def test_to_from_filesystem_search(self):
model = Pipeline(
[
("imputer", SimpleImputer(strategy="mean")),
("classifier", DecisionTreeClassifier(max_depth=1)),
],
)
model = GridSearchCV(
estimator=model,
param_grid={
"classifier__max_depth": [1, 2, 3, 4, 5],
"imputer__strategy": ["mean", "median"],
},
)
task = openml.tasks.get_task(119) # diabetes; crossvalidation
run = openml.runs.run_model_on_task(
model=model,
task=task,
add_local_measures=False,
)
cache_path = os.path.join(self.workdir, "runs", str(random.getrandbits(128)))
run.to_filesystem(cache_path)
run_prime = openml.runs.OpenMLRun.from_filesystem(cache_path)
self._test_run_obj_equals(run, run_prime)
run_prime.publish()
TestBase._mark_entity_for_removal("run", run_prime.run_id)
TestBase.logger.info(
f"collected from {__file__.split('/')[-1]}: {run_prime.run_id}",
)
@pytest.mark.sklearn()
@pytest.mark.test_server()
def test_to_from_filesystem_no_model(self):
model = Pipeline(
[("imputer", SimpleImputer(strategy="mean")), ("classifier", DummyClassifier())],
)
task = openml.tasks.get_task(119) # diabetes; crossvalidation
run = openml.runs.run_model_on_task(model=model, task=task, add_local_measures=False)
cache_path = os.path.join(self.workdir, "runs", str(random.getrandbits(128)))
run.to_filesystem(cache_path, store_model=False)
# obtain run from filesystem
openml.runs.OpenMLRun.from_filesystem(cache_path, expect_model=False)
# assert default behaviour is throwing an error
with self.assertRaises(ValueError, msg="Could not find model.pkl"):
openml.runs.OpenMLRun.from_filesystem(cache_path)
@staticmethod
def _cat_col_selector(X):
return X.select_dtypes(include=["object", "category"]).columns
@staticmethod
def _get_models_tasks_for_tests():
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
basic_preprocessing = [
(
"cat_handling",
ColumnTransformer(
transformers=[
(
"cat",
OneHotEncoder(handle_unknown="ignore"),
TestRun._cat_col_selector,
)
],
remainder="passthrough",
),
),
("imp", SimpleImputer()),
]
model_clf = Pipeline(
[
*basic_preprocessing,
("classifier", DummyClassifier(strategy="prior")),
],
)
model_reg = Pipeline(
[
*basic_preprocessing,
(
"regressor",
# LR because dummy does not produce enough float-like values
LinearRegression(),
),
],
)
task_clf = openml.tasks.get_task(119) # diabetes; hold out validation
task_reg = openml.tasks.get_task(733) # quake; crossvalidation
return [(model_clf, task_clf), (model_reg, task_reg)]
@staticmethod
def assert_run_prediction_data(task, run, model):
# -- Get y_pred and y_true as it should be stored in the run
n_repeats, n_folds, n_samples = task.get_split_dimensions()
if (n_repeats > 1) or (n_samples > 1):
raise ValueError("Test does not support this task type's split dimensions.")
X, y = task.get_X_and_y()
# Check correctness of y_true and y_pred in run
for fold_id in range(n_folds):
# Get data for fold
_, test_indices = task.get_train_test_split_indices(repeat=0, fold=fold_id, sample=0)
train_mask = np.full(len(X), True)
train_mask[test_indices] = False
# Get train / test
X_train = X[train_mask]
y_train = y[train_mask]
X_test = X[~train_mask]
y_test = y[~train_mask]
# Get y_pred
y_pred = model.fit(X_train, y_train).predict(X_test)
# Get stored data for fold
saved_fold_data = run.predictions[run.predictions["fold"] == fold_id].sort_values(
by="row_id",
)
saved_y_pred = saved_fold_data["prediction"].values
gt_key = "truth" if "truth" in list(saved_fold_data) else "correct"
saved_y_test = saved_fold_data[gt_key].values
assert_method = np.testing.assert_array_almost_equal
if task.task_type == "Supervised Classification":
assert_method = np.testing.assert_array_equal
y_test = y_test.values
# Assert correctness
assert_method(y_pred, saved_y_pred)
assert_method(y_test, saved_y_test)
@pytest.mark.sklearn()
@pytest.mark.test_server()
def test_publish_with_local_loaded_flow(self):
"""
Publish a run tied to a local flow after it has first been saved to
and loaded from disk.
"""
extension = SklearnExtension()
for model, task in self._get_models_tasks_for_tests():
# Make sure the flow does not exist on the server yet.
flow = extension.model_to_flow(model)
self._add_sentinel_to_flow_name(flow)
assert not openml.flows.flow_exists(flow.name, flow.external_version)
run = openml.runs.run_flow_on_task(
flow=flow,
task=task,
add_local_measures=False,
upload_flow=False,
)
# Make sure that the flow has not been uploaded as requested.
assert not openml.flows.flow_exists(flow.name, flow.external_version)
# Make sure that the prediction data stored in the run is correct.
self.assert_run_prediction_data(task, run, clone(model))
cache_path = os.path.join(self.workdir, "runs", str(random.getrandbits(128)))
run.to_filesystem(cache_path)
# obtain run from filesystem
loaded_run = openml.runs.OpenMLRun.from_filesystem(cache_path)
loaded_run.publish()
# Clean up
TestBase._mark_entity_for_removal("run", loaded_run.run_id)
TestBase.logger.info(
f"collected from {__file__.split('/')[-1]}: {loaded_run.run_id}",
)
# make sure the flow is published as part of publishing the run.
assert openml.flows.flow_exists(flow.name, flow.external_version)
openml.runs.get_run(loaded_run.run_id)
@pytest.mark.sklearn()
@pytest.mark.test_server()
@pytest.mark.skip(reason="https://github.com/openml/openml-python/issues/1586")
def test_offline_and_online_run_identical(self):
extension = SklearnExtension()
for model, task in self._get_models_tasks_for_tests():
# Make sure the flow does not exist on the server yet.
flow = extension.model_to_flow(model)
self._add_sentinel_to_flow_name(flow)
assert not openml.flows.flow_exists(flow.name, flow.external_version)
run = openml.runs.run_flow_on_task(
flow=flow,
task=task,
add_local_measures=False,
upload_flow=False,
)
# Make sure that the flow has not been uploaded as requested.
assert not openml.flows.flow_exists(flow.name, flow.external_version)
# Load from filesystem
cache_path = os.path.join(self.workdir, "runs", str(random.getrandbits(128)))
run.to_filesystem(cache_path)
loaded_run = openml.runs.OpenMLRun.from_filesystem(cache_path)
# Assert identical for offline - offline
self._test_run_obj_equals(run, loaded_run)
# Publish and test for offline - online
run.publish()
assert openml.flows.flow_exists(flow.name, flow.external_version)
try:
online_run = openml.runs.get_run(run.run_id, ignore_cache=True)
self._test_prediction_data_equal(run, online_run)
finally:
# Clean up
TestBase._mark_entity_for_removal("run", run.run_id)
TestBase.logger.info(
f"collected from {__file__.split('/')[-1]}: {loaded_run.run_id}",
)
def test_run_setup_string_included_in_xml(self):
SETUP_STRING = "setup-string"
run = OpenMLRun(
task_id=0,
flow_id=None, # if not none, flow parameters are required.
dataset_id=0,
setup_string=SETUP_STRING,
)
xml = run._to_xml()
run_dict = xmltodict.parse(xml)["oml:run"]
assert "oml:setup_string" in run_dict
assert run_dict["oml:setup_string"] == SETUP_STRING
recreated_run = openml.runs.functions._create_run_from_xml(xml, from_server=False)
assert recreated_run.setup_string == SETUP_STRING