-
-
Notifications
You must be signed in to change notification settings - Fork 270
Expand file tree
/
Copy pathfunctions.py
More file actions
1464 lines (1249 loc) · 53.9 KB
/
functions.py
File metadata and controls
1464 lines (1249 loc) · 53.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# License: BSD 3-Clause
# ruff: noqa: PLR0913
from __future__ import annotations
import logging
import os
import warnings
from collections import OrderedDict
from functools import partial
from pathlib import Path
from pyexpat import ExpatError
from typing import TYPE_CHECKING, Any, Literal
import arff
import minio.error
import numpy as np
import pandas as pd
import urllib3
import xmltodict
from scipy.sparse import coo_matrix
import openml
import openml._api_calls
import openml.utils
from openml.exceptions import (
OpenMLHashException,
OpenMLPrivateDatasetError,
OpenMLServerError,
OpenMLServerException,
)
from openml.utils import (
_create_cache_directory_for_id,
_get_cache_dir_for_id,
_remove_cache_dir_for_id,
)
from .dataset import OpenMLDataset
if TYPE_CHECKING:
import scipy
DATASETS_CACHE_DIR_NAME = "datasets"
logger = logging.getLogger(__name__)
NO_ACCESS_GRANTED_ERRCODE = 112
############################################################################
# Local getters/accessors to the cache directory
def _get_cache_directory(dataset: OpenMLDataset) -> Path:
"""Creates and returns the cache directory of the OpenMLDataset."""
assert dataset.dataset_id is not None
return _create_cache_directory_for_id(DATASETS_CACHE_DIR_NAME, dataset.dataset_id)
def list_qualities() -> list[str]:
"""Return list of data qualities available.
The function performs an API call to retrieve the entire list of
data qualities that are computed on the datasets uploaded.
Returns
-------
list
"""
api_call = "data/qualities/list"
xml_string = openml._api_calls._perform_api_call(api_call, "get")
qualities = xmltodict.parse(xml_string, force_list=("oml:quality"))
# Minimalistic check if the XML is useful
if "oml:data_qualities_list" not in qualities:
raise ValueError('Error in return XML, does not contain "oml:data_qualities_list"')
if not isinstance(qualities["oml:data_qualities_list"]["oml:quality"], list):
raise TypeError('Error in return XML, does not contain "oml:quality" as a list')
return qualities["oml:data_qualities_list"]["oml:quality"]
def list_datasets(
data_id: list[int] | None = None,
offset: int | None = None,
size: int | None = None,
status: str | None = None,
tag: str | None = None,
data_name: str | None = None,
data_version: int | None = None,
number_instances: int | str | None = None,
number_features: int | str | None = None,
number_classes: int | str | None = None,
number_missing_values: int | str | None = None,
) -> pd.DataFrame:
"""Return a dataframe of all dataset which are on OpenML.
Supports large amount of results.
Parameters
----------
data_id : list, optional
A list of data ids, to specify which datasets should be
listed
offset : int, optional
The number of datasets to skip, starting from the first.
size : int, optional
The maximum number of datasets to show.
status : str, optional
Should be {active, in_preparation, deactivated}. By
default active datasets are returned, but also datasets
from another status can be requested.
tag : str, optional
data_name : str, optional
data_version : int, optional
number_instances : int | str, optional
number_features : int | str, optional
number_classes : int | str, optional
number_missing_values : int | str, optional
Returns
-------
datasets: dataframe
Each row maps to a dataset
Each column contains the following information:
- dataset id
- name
- format
- status
If qualities are calculated for the dataset, some of
these are also included as columns.
"""
listing_call = partial(
_list_datasets,
data_id=data_id,
status=status,
tag=tag,
data_name=data_name,
data_version=data_version,
number_instances=number_instances,
number_features=number_features,
number_classes=number_classes,
number_missing_values=number_missing_values,
)
batches = openml.utils._list_all(listing_call, offset=offset, limit=size)
if len(batches) == 0:
return pd.DataFrame()
return pd.concat(batches)
def _list_datasets(
limit: int,
offset: int,
*,
data_id: list[int] | None = None,
**kwargs: Any,
) -> pd.DataFrame:
"""
Perform api call to return a list of all datasets.
Parameters
----------
The arguments that are lists are separated from the single value
ones which are put into the kwargs.
display_errors is also separated from the kwargs since it has a
default value.
limit : int
The maximum number of datasets to show.
offset : int
The number of datasets to skip, starting from the first.
data_id : list, optional
kwargs : dict, optional
Legal filter operators (keys in the dict):
tag, status, limit, offset, data_name, data_version, number_instances,
number_features, number_classes, number_missing_values.
Returns
-------
datasets : dataframe
"""
api_call = "data/list"
if limit is not None:
api_call += f"/limit/{limit}"
if offset is not None:
api_call += f"/offset/{offset}"
if kwargs is not None:
for operator, value in kwargs.items():
if value is not None:
api_call += f"/{operator}/{value}"
if data_id is not None:
api_call += f"/data_id/{','.join([str(int(i)) for i in data_id])}"
return __list_datasets(api_call=api_call)
def __list_datasets(api_call: str) -> pd.DataFrame:
xml_string = openml._api_calls._perform_api_call(api_call, "get")
datasets_dict = xmltodict.parse(xml_string, force_list=("oml:dataset",))
# Minimalistic check if the XML is useful
assert isinstance(datasets_dict["oml:data"]["oml:dataset"], list), type(
datasets_dict["oml:data"],
)
assert datasets_dict["oml:data"]["@xmlns:oml"] == "http://openml.org/openml", datasets_dict[
"oml:data"
]["@xmlns:oml"]
datasets = {}
for dataset_ in datasets_dict["oml:data"]["oml:dataset"]:
ignore_attribute = ["oml:file_id", "oml:quality"]
dataset = {
k.replace("oml:", ""): v for (k, v) in dataset_.items() if k not in ignore_attribute
}
dataset["did"] = int(dataset["did"])
dataset["version"] = int(dataset["version"])
# The number of qualities can range from 0 to infinity
for quality in dataset_.get("oml:quality", []):
try:
dataset[quality["@name"]] = int(quality["#text"])
except ValueError:
dataset[quality["@name"]] = float(quality["#text"])
datasets[dataset["did"]] = dataset
return pd.DataFrame.from_dict(datasets, orient="index").astype(
{
"did": int,
"version": int,
"status": pd.CategoricalDtype(["active", "deactivated", "in_preparation"]),
}
)
def _expand_parameter(parameter: str | list[str] | None) -> list[str]:
expanded_parameter = []
if isinstance(parameter, str):
expanded_parameter = [x.strip() for x in parameter.split(",")]
elif isinstance(parameter, list):
expanded_parameter = parameter
return expanded_parameter
def _validated_data_attributes(
attributes: list[str],
data_attributes: list[tuple[str, Any]],
parameter_name: str,
) -> None:
for attribute_ in attributes:
is_attribute_a_data_attribute = any(dattr[0] == attribute_ for dattr in data_attributes)
if not is_attribute_a_data_attribute:
raise ValueError(
f"all attribute of '{parameter_name}' should be one of the data attribute. "
f" Got '{attribute_}' while candidates are"
f" {[dattr[0] for dattr in data_attributes]}.",
)
def check_datasets_active(
dataset_ids: list[int],
raise_error_if_not_exist: bool = True, # noqa: FBT002
) -> dict[int, bool]:
"""
Check if the dataset ids provided are active.
Raises an error if a dataset_id in the given list
of dataset_ids does not exist on the server and
`raise_error_if_not_exist` is set to True (default).
Parameters
----------
dataset_ids : List[int]
A list of integers representing dataset ids.
raise_error_if_not_exist : bool (default=True)
Flag that if activated can raise an error, if one or more of the
given dataset ids do not exist on the server.
Returns
-------
dict
A dictionary with items {did: bool}
"""
datasets = list_datasets(status="all", data_id=dataset_ids)
missing = set(dataset_ids) - set(datasets.index)
if raise_error_if_not_exist and missing:
missing_str = ", ".join(str(did) for did in missing)
raise ValueError(f"Could not find dataset(s) {missing_str} in OpenML dataset list.")
mask = datasets["status"] == "active"
return dict(mask)
def _name_to_id(
dataset_name: str,
version: int | None = None,
error_if_multiple: bool = False, # noqa: FBT002
) -> int:
"""Attempt to find the dataset id of the dataset with the given name.
If multiple datasets with the name exist, and ``error_if_multiple`` is ``False``,
then return the least recent still active dataset.
Raises an error if no dataset with the name is found.
Raises an error if a version is specified but it could not be found.
Parameters
----------
dataset_name : str
The name of the dataset for which to find its id.
version : int, optional
Version to retrieve. If not specified, the oldest active version is returned.
error_if_multiple : bool (default=False)
If `False`, if multiple datasets match, return the least recent active dataset.
If `True`, if multiple datasets match, raise an error.
download_qualities : bool, optional (default=True)
If `True`, also download qualities.xml file. If False it skip the qualities.xml.
Returns
-------
int
The id of the dataset.
"""
status = None if version is not None else "active"
candidates = list_datasets(
data_name=dataset_name,
status=status,
data_version=version,
)
if error_if_multiple and len(candidates) > 1:
msg = f"Multiple active datasets exist with name '{dataset_name}'."
raise ValueError(msg)
if candidates.empty:
no_dataset_for_name = f"No active datasets exist with name '{dataset_name}'"
and_version = f" and version '{version}'." if version is not None else "."
raise RuntimeError(no_dataset_for_name + and_version)
# Dataset ids are chronological so we can just sort based on ids (instead of version)
return candidates["did"].min() # type: ignore
def get_datasets(
dataset_ids: list[str | int],
download_data: bool = False, # noqa: FBT002
download_qualities: bool = False, # noqa: FBT002
) -> list[OpenMLDataset]:
"""Download datasets.
This function iterates :meth:`openml.datasets.get_dataset`.
Parameters
----------
dataset_ids : iterable
Integers or strings representing dataset ids or dataset names.
If dataset names are specified, the least recent still active dataset version is returned.
download_data : bool, optional
If True, also download the data file. Beware that some datasets are large and it might
make the operation noticeably slower. Metadata is also still retrieved.
If False, create the OpenMLDataset and only populate it with the metadata.
The data may later be retrieved through the `OpenMLDataset.get_data` method.
download_qualities : bool, optional (default=True)
If True, also download qualities.xml file. If False it skip the qualities.xml.
Returns
-------
datasets : list of datasets
A list of dataset objects.
"""
datasets = []
for dataset_id in dataset_ids:
datasets.append(
get_dataset(dataset_id, download_data, download_qualities=download_qualities),
)
return datasets
@openml.utils.thread_safe_if_oslo_installed
def get_dataset( # noqa: C901, PLR0912
dataset_id: int | str,
download_data: bool = False, # noqa: FBT002
version: int | None = None,
error_if_multiple: bool = False, # noqa: FBT002
cache_format: Literal["pickle", "feather"] = "pickle",
download_qualities: bool = False, # noqa: FBT002
download_features_meta_data: bool = False, # noqa: FBT002
download_all_files: bool = False, # noqa: FBT002
force_refresh_cache: bool = False, # noqa: FBT002
) -> OpenMLDataset:
"""Download the OpenML dataset representation, optionally also download actual data file.
This function is by default NOT thread/multiprocessing safe, as this function uses caching.
A check will be performed to determine if the information has previously been downloaded to a
cache, and if so be loaded from disk instead of retrieved from the server.
To make this function thread safe, you can install the python package ``oslo.concurrency``.
If ``oslo.concurrency`` is installed `get_dataset` becomes thread safe.
Alternatively, to make this function thread/multiprocessing safe initialize the cache first by
calling `get_dataset(args)` once before calling `get_dataset(args)` many times in parallel.
This will initialize the cache and later calls will use the cache in a thread/multiprocessing
safe way.
If dataset is retrieved by name, a version may be specified.
If no version is specified and multiple versions of the dataset exist,
the earliest version of the dataset that is still active will be returned.
If no version is specified, multiple versions of the dataset exist and
``exception_if_multiple`` is set to ``True``, this function will raise an exception.
Parameters
----------
dataset_id : int or str
Dataset ID (integer) or dataset name (string) of the dataset to download.
download_data : bool (default=False)
If True, also download the data file. Beware that some datasets are large and it might
make the operation noticeably slower. Metadata is also still retrieved.
If False, create the OpenMLDataset and only populate it with the metadata.
The data may later be retrieved through the `OpenMLDataset.get_data` method.
version : int, optional (default=None)
Specifies the version if `dataset_id` is specified by name.
If no version is specified, retrieve the least recent still active version.
error_if_multiple : bool (default=False)
If ``True`` raise an error if multiple datasets are found with matching criteria.
cache_format : str (default='pickle') in {'pickle', 'feather'}
Format for caching the dataset - may be feather or pickle
Note that the default 'pickle' option may load slower than feather when
no.of.rows is very high.
download_qualities : bool (default=False)
Option to download 'qualities' meta-data in addition to the minimal dataset description.
If True, download and cache the qualities file.
If False, create the OpenMLDataset without qualities metadata. The data may later be added
to the OpenMLDataset through the `OpenMLDataset.load_metadata(qualities=True)` method.
download_features_meta_data : bool (default=False)
Option to download 'features' meta-data in addition to the minimal dataset description.
If True, download and cache the features file.
If False, create the OpenMLDataset without features metadata. The data may later be added
to the OpenMLDataset through the `OpenMLDataset.load_metadata(features=True)` method.
download_all_files: bool (default=False)
EXPERIMENTAL. Download all files related to the dataset that reside on the server.
Useful for datasets which refer to auxiliary files (e.g., meta-album).
force_refresh_cache : bool (default=False)
Force the cache to refreshed by deleting the cache directory and re-downloading the data.
Note, if `force_refresh_cache` is True, `get_dataset` is NOT thread/multiprocessing safe,
because this creates a race condition to creating and deleting the cache; as in general with
the cache.
Returns
-------
dataset : :class:`openml.OpenMLDataset`
The downloaded dataset.
"""
if download_all_files:
warnings.warn(
"``download_all_files`` is experimental and is likely to break with new releases.",
FutureWarning,
stacklevel=2,
)
if cache_format not in ["feather", "pickle"]:
raise ValueError(
"cache_format must be one of 'feather' or 'pickle. "
f"Invalid format specified: {cache_format}",
)
if isinstance(dataset_id, str):
try:
dataset_id = int(dataset_id)
except ValueError:
dataset_id = _name_to_id(dataset_id, version, error_if_multiple) # type: ignore
elif not isinstance(dataset_id, int):
raise TypeError(
f"`dataset_id` must be one of `str` or `int`, not {type(dataset_id)}.",
)
if force_refresh_cache:
did_cache_dir = _get_cache_dir_for_id(DATASETS_CACHE_DIR_NAME, dataset_id)
if did_cache_dir.exists():
_remove_cache_dir_for_id(DATASETS_CACHE_DIR_NAME, did_cache_dir)
did_cache_dir = _create_cache_directory_for_id(
DATASETS_CACHE_DIR_NAME,
dataset_id,
)
remove_dataset_cache = True
try:
description = _get_dataset_description(did_cache_dir, dataset_id)
features_file = None
qualities_file = None
if download_features_meta_data:
features_file = _get_dataset_features_file(did_cache_dir, dataset_id)
if download_qualities:
qualities_file = _get_dataset_qualities_file(did_cache_dir, dataset_id)
parquet_file = None
skip_parquet = (
os.environ.get(openml.config.OPENML_SKIP_PARQUET_ENV_VAR, "false").casefold() == "true"
)
download_parquet = "oml:parquet_url" in description and not skip_parquet
if download_parquet and (download_data or download_all_files):
try:
parquet_file = _get_dataset_parquet(
description,
download_all_files=download_all_files,
)
except urllib3.exceptions.MaxRetryError:
parquet_file = None
arff_file = None
if parquet_file is None and download_data:
if download_parquet:
logger.warning("Failed to download parquet, fallback on ARFF.")
arff_file = _get_dataset_arff(description)
remove_dataset_cache = False
except OpenMLServerException as e:
# if there was an exception
# check if the user had access to the dataset
if e.code == NO_ACCESS_GRANTED_ERRCODE:
raise OpenMLPrivateDatasetError(e.message) from None
raise e
finally:
if remove_dataset_cache:
_remove_cache_dir_for_id(DATASETS_CACHE_DIR_NAME, did_cache_dir)
return _create_dataset_from_description(
description,
features_file,
qualities_file,
arff_file,
parquet_file,
cache_format,
)
def attributes_arff_from_df(df: pd.DataFrame) -> list[tuple[str, list[str] | str]]:
"""Describe attributes of the dataframe according to ARFF specification.
Parameters
----------
df : DataFrame, shape (n_samples, n_features)
The dataframe containing the data set.
Returns
-------
attributes_arff : list[str]
The data set attributes as required by the ARFF format.
"""
PD_DTYPES_TO_ARFF_DTYPE = {"integer": "INTEGER", "floating": "REAL", "string": "STRING"}
attributes_arff: list[tuple[str, list[str] | str]] = []
if not all(isinstance(column_name, str) for column_name in df.columns):
logger.warning("Converting non-str column names to str.")
df.columns = [str(column_name) for column_name in df.columns]
for column_name in df:
# skipna=True does not infer properly the dtype. The NA values are
# dropped before the inference instead.
column_dtype = pd.api.types.infer_dtype(df[column_name].dropna(), skipna=False)
if column_dtype == "categorical":
# for categorical feature, arff expects a list string. However, a
# categorical column can contain mixed type and should therefore
# raise an error asking to convert all entries to string.
categories = df[column_name].cat.categories
categories_dtype = pd.api.types.infer_dtype(categories)
if categories_dtype not in ("string", "unicode"):
raise ValueError(
f"The column '{column_name}' of the dataframe is of "
"'category' dtype. Therefore, all values in "
"this columns should be string. Please "
"convert the entries which are not string. "
f"Got {categories_dtype} dtype in this column.",
)
attributes_arff.append((column_name, categories.tolist()))
elif column_dtype == "boolean":
# boolean are encoded as categorical.
attributes_arff.append((column_name, ["True", "False"]))
elif column_dtype in PD_DTYPES_TO_ARFF_DTYPE:
attributes_arff.append((column_name, PD_DTYPES_TO_ARFF_DTYPE[column_dtype]))
else:
raise ValueError(
f"The dtype '{column_dtype}' of the column '{column_name}' is not "
"currently supported by liac-arff. Supported "
"dtypes are categorical, string, integer, "
"floating, and boolean.",
)
return attributes_arff
def create_dataset( # noqa: C901, PLR0912, PLR0915
name: str,
description: str | None,
creator: str | None,
contributor: str | None,
collection_date: str | None,
language: str | None,
licence: str | None,
# TODO(eddiebergman): Docstring says `type` but I don't know what this is other than strings
# Edit: Found it could also be like ["True", "False"]
attributes: list[tuple[str, str | list[str]]] | dict[str, str | list[str]] | Literal["auto"],
data: pd.DataFrame | np.ndarray | scipy.sparse.coo_matrix,
default_target_attribute: str | None,
ignore_attribute: str | list[str] | None,
citation: str,
row_id_attribute: str | None = None,
original_data_url: str | None = None,
paper_url: str | None = None,
update_comment: str | None = None,
version_label: str | None = None,
) -> OpenMLDataset:
"""Create a dataset.
This function creates an OpenMLDataset object.
The OpenMLDataset object contains information related to the dataset
and the actual data file.
Parameters
----------
name : str
Name of the dataset.
description : str
Description of the dataset.
creator : str
The person who created the dataset.
contributor : str
People who contributed to the current version of the dataset.
collection_date : str
The date the data was originally collected, given by the uploader.
language : str
Language in which the data is represented.
Starts with 1 upper case letter, rest lower case, e.g. 'English'.
licence : str
License of the data.
attributes : list, dict, or 'auto'
A list of tuples. Each tuple consists of the attribute name and type.
If passing a pandas DataFrame, the attributes can be automatically
inferred by passing ``'auto'``. Specific attributes can be manually
specified by a passing a dictionary where the key is the name of the
attribute and the value is the data type of the attribute.
data : ndarray, list, dataframe, coo_matrix, shape (n_samples, n_features)
An array that contains both the attributes and the targets. When
providing a dataframe, the attribute names and type can be inferred by
passing ``attributes='auto'``.
The target feature is indicated as meta-data of the dataset.
default_target_attribute : str
The default target attribute, if it exists. Use ``None`` for unsupervised datasets
(e.g. clustering, anomaly detection) where no natural target column exists.
Can have multiple values, comma separated.
ignore_attribute : str | list
Attributes that should be excluded in modelling,
such as identifiers and indexes.
Can have multiple values, comma separated.
citation : str
Reference(s) that should be cited when building on this data.
version_label : str, optional
Version label provided by user.
Can be a date, hash, or some other type of id.
row_id_attribute : str, optional
The attribute that represents the row-id column, if present in the
dataset. If ``data`` is a dataframe and ``row_id_attribute`` is not
specified, the index of the dataframe will be used as the
``row_id_attribute``. If the name of the index is ``None``, it will
be discarded.
.. versionadded: 0.8
Inference of ``row_id_attribute`` from a dataframe.
original_data_url : str, optional
For derived data, the url to the original dataset.
paper_url : str, optional
Link to a paper describing the dataset.
update_comment : str, optional
An explanation for when the dataset is uploaded.
Returns
-------
class:`openml.OpenMLDataset`
Dataset description.
"""
if isinstance(data, pd.DataFrame):
# infer the row id from the index of the dataset
if row_id_attribute is None:
row_id_attribute = data.index.name
# When calling data.values, the index will be skipped.
# We need to reset the index such that it is part of the data.
if data.index.name is not None:
data = data.reset_index()
if attributes == "auto" or isinstance(attributes, dict):
if not isinstance(data, pd.DataFrame):
raise ValueError(
"Automatically inferring attributes requires "
f"a pandas DataFrame. A {data!r} was given instead.",
)
# infer the type of data for each column of the DataFrame
attributes_ = attributes_arff_from_df(data)
if isinstance(attributes, dict):
# override the attributes which was specified by the user
for attr_idx in range(len(attributes_)):
attr_name = attributes_[attr_idx][0]
if attr_name in attributes:
attributes_[attr_idx] = (attr_name, attributes[attr_name])
else:
attributes_ = attributes
ignore_attributes = _expand_parameter(ignore_attribute)
_validated_data_attributes(ignore_attributes, attributes_, "ignore_attribute")
default_target_attributes = _expand_parameter(default_target_attribute)
_validated_data_attributes(default_target_attributes, attributes_, "default_target_attribute")
if row_id_attribute is not None:
is_row_id_an_attribute = any(attr[0] == row_id_attribute for attr in attributes_)
if not is_row_id_an_attribute:
raise ValueError(
"'row_id_attribute' should be one of the data attribute. "
f" Got '{row_id_attribute}' while candidates are"
f" {[attr[0] for attr in attributes_]}.",
)
if isinstance(data, pd.DataFrame):
if all(isinstance(dtype, pd.SparseDtype) for dtype in data.dtypes):
data = data.sparse.to_coo()
# liac-arff only support COO matrices with sorted rows
row_idx_sorted = np.argsort(data.row) # type: ignore
data.row = data.row[row_idx_sorted] # type: ignore
data.col = data.col[row_idx_sorted] # type: ignore
data.data = data.data[row_idx_sorted] # type: ignore
else:
data = data.to_numpy()
data_format: Literal["arff", "sparse_arff"]
if isinstance(data, (list, np.ndarray)):
if isinstance(data[0], (list, np.ndarray)):
data_format = "arff"
elif isinstance(data[0], dict):
data_format = "sparse_arff"
else:
raise ValueError(
"When giving a list or a numpy.ndarray, "
"they should contain a list/ numpy.ndarray "
"for dense data or a dictionary for sparse "
f"data. Got {data[0]!r} instead.",
)
elif isinstance(data, coo_matrix):
data_format = "sparse_arff"
else:
raise ValueError(
"When giving a list or a numpy.ndarray, "
"they should contain a list/ numpy.ndarray "
"for dense data or a dictionary for sparse "
f"data. Got {data[0]!r} instead.",
)
arff_object = {
"relation": name,
"description": description,
"attributes": attributes_,
"data": data,
}
# serializes the ARFF dataset object and returns a string
arff_dataset = arff.dumps(arff_object)
try:
# check if ARFF is valid
decoder = arff.ArffDecoder()
return_type = arff.COO if data_format == "sparse_arff" else arff.DENSE
decoder.decode(arff_dataset, encode_nominal=True, return_type=return_type)
except arff.ArffException as e:
raise ValueError(
"The arguments you have provided do not construct a valid ARFF file"
) from e
return OpenMLDataset(
name=name,
description=description,
data_format=data_format,
creator=creator,
contributor=contributor,
collection_date=collection_date,
language=language,
licence=licence,
default_target_attribute=default_target_attribute,
row_id_attribute=row_id_attribute,
ignore_attribute=ignore_attribute,
citation=citation,
version_label=version_label,
original_data_url=original_data_url,
paper_url=paper_url,
update_comment=update_comment,
dataset=arff_dataset,
)
def status_update(data_id: int, status: Literal["active", "deactivated"]) -> None:
"""
Updates the status of a dataset to either 'active' or 'deactivated'.
Please see the OpenML API documentation for a description of the status
and all legal status transitions:
https://docs.openml.org/concepts/data/#dataset-status
Parameters
----------
data_id : int
The data id of the dataset
status : str,
'active' or 'deactivated'
"""
legal_status = {"active", "deactivated"}
if status not in legal_status:
raise ValueError(f"Illegal status value. Legal values: {legal_status}")
data: openml._api_calls.DATA_TYPE = {"data_id": data_id, "status": status}
result_xml = openml._api_calls._perform_api_call("data/status/update", "post", data=data)
result = xmltodict.parse(result_xml)
server_data_id = result["oml:data_status_update"]["oml:id"]
server_status = result["oml:data_status_update"]["oml:status"]
if status != server_status or int(data_id) != int(server_data_id):
# This should never happen
raise ValueError("Data id/status does not collide")
def edit_dataset(
data_id: int,
description: str | None = None,
creator: str | None = None,
contributor: str | None = None,
collection_date: str | None = None,
language: str | None = None,
default_target_attribute: str | None = None,
ignore_attribute: str | list[str] | None = None,
citation: str | None = None,
row_id_attribute: str | None = None,
original_data_url: str | None = None,
paper_url: str | None = None,
) -> int:
"""Edits an OpenMLDataset.
In addition to providing the dataset id of the dataset to edit (through data_id),
you must specify a value for at least one of the optional function arguments,
i.e. one value for a field to edit.
This function allows editing of both non-critical and critical fields.
Critical fields are default_target_attribute, ignore_attribute, row_id_attribute.
- Editing non-critical data fields is allowed for all authenticated users.
- Editing critical fields is allowed only for the owner, provided there are no tasks
associated with this dataset.
If dataset has tasks or if the user is not the owner, the only way
to edit critical fields is to use fork_dataset followed by edit_dataset.
Parameters
----------
data_id : int
ID of the dataset.
description : str
Description of the dataset.
creator : str
The person who created the dataset.
contributor : str
People who contributed to the current version of the dataset.
collection_date : str
The date the data was originally collected, given by the uploader.
language : str
Language in which the data is represented.
Starts with 1 upper case letter, rest lower case, e.g. 'English'.
default_target_attribute : str
The default target attribute, if it exists.
Can have multiple values, comma separated.
ignore_attribute : str | list
Attributes that should be excluded in modelling,
such as identifiers and indexes.
citation : str
Reference(s) that should be cited when building on this data.
row_id_attribute : str, optional
The attribute that represents the row-id column, if present in the
dataset. If ``data`` is a dataframe and ``row_id_attribute`` is not
specified, the index of the dataframe will be used as the
``row_id_attribute``. If the name of the index is ``None``, it will
be discarded.
.. versionadded: 0.8
Inference of ``row_id_attribute`` from a dataframe.
original_data_url : str, optional
For derived data, the url to the original dataset.
paper_url : str, optional
Link to a paper describing the dataset.
Returns
-------
Dataset id
"""
if not isinstance(data_id, int):
raise TypeError(f"`data_id` must be of type `int`, not {type(data_id)}.")
# compose data edit parameters as xml
form_data = {"data_id": data_id} # type: openml._api_calls.DATA_TYPE
xml = OrderedDict() # type: 'OrderedDict[str, OrderedDict]'
xml["oml:data_edit_parameters"] = OrderedDict()
xml["oml:data_edit_parameters"]["@xmlns:oml"] = "http://openml.org/openml"
xml["oml:data_edit_parameters"]["oml:description"] = description
xml["oml:data_edit_parameters"]["oml:creator"] = creator
xml["oml:data_edit_parameters"]["oml:contributor"] = contributor
xml["oml:data_edit_parameters"]["oml:collection_date"] = collection_date
xml["oml:data_edit_parameters"]["oml:language"] = language
xml["oml:data_edit_parameters"]["oml:default_target_attribute"] = default_target_attribute
xml["oml:data_edit_parameters"]["oml:row_id_attribute"] = row_id_attribute
xml["oml:data_edit_parameters"]["oml:ignore_attribute"] = ignore_attribute
xml["oml:data_edit_parameters"]["oml:citation"] = citation
xml["oml:data_edit_parameters"]["oml:original_data_url"] = original_data_url
xml["oml:data_edit_parameters"]["oml:paper_url"] = paper_url
# delete None inputs
for k in list(xml["oml:data_edit_parameters"]):
if not xml["oml:data_edit_parameters"][k]:
del xml["oml:data_edit_parameters"][k]
file_elements = {
"edit_parameters": ("description.xml", xmltodict.unparse(xml)),
} # type: openml._api_calls.FILE_ELEMENTS_TYPE
result_xml = openml._api_calls._perform_api_call(
"data/edit",
"post",
data=form_data,
file_elements=file_elements,
)
result = xmltodict.parse(result_xml)
data_id = result["oml:data_edit"]["oml:id"]
return int(data_id)
def fork_dataset(data_id: int) -> int:
"""
Creates a new dataset version, with the authenticated user as the new owner.
The forked dataset can have distinct dataset meta-data,
but the actual data itself is shared with the original version.
This API is intended for use when a user is unable to edit the critical fields of a dataset
through the edit_dataset API.
(Critical fields are default_target_attribute, ignore_attribute, row_id_attribute.)
Specifically, this happens when the user is:
1. Not the owner of the dataset.
2. User is the owner of the dataset, but the dataset has tasks.
In these two cases the only way to edit critical fields is:
1. STEP 1: Fork the dataset using fork_dataset API
2. STEP 2: Call edit_dataset API on the forked version.
Parameters
----------
data_id : int
id of the dataset to be forked
Returns
-------
Dataset id of the forked dataset
"""
if not isinstance(data_id, int):
raise TypeError(f"`data_id` must be of type `int`, not {type(data_id)}.")
# compose data fork parameters
form_data = {"data_id": data_id} # type: openml._api_calls.DATA_TYPE
result_xml = openml._api_calls._perform_api_call("data/fork", "post", data=form_data)
result = xmltodict.parse(result_xml)
data_id = result["oml:data_fork"]["oml:id"]
return int(data_id)
def data_feature_add_ontology(data_id: int, index: int, ontology: str) -> bool:
"""
An ontology describes the concept that are described in a feature. An
ontology is defined by an URL where the information is provided. Adds
an ontology (URL) to a given dataset feature (defined by a dataset id
and index). The dataset has to exists on OpenML and needs to have been
processed by the evaluation engine.
Parameters
----------
data_id : int
id of the dataset to which the feature belongs
index : int
index of the feature in dataset (0-based)
ontology : str
URL to ontology (max. 256 characters)
Returns
-------
True or throws an OpenML server exception
"""
upload_data: dict[str, int | str] = {"data_id": data_id, "index": index, "ontology": ontology}
openml._api_calls._perform_api_call("data/feature/ontology/add", "post", data=upload_data)
# an error will be thrown in case the request was unsuccessful
return True
def data_feature_remove_ontology(data_id: int, index: int, ontology: str) -> bool:
"""
Removes an existing ontology (URL) from a given dataset feature (defined