Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions openml/_api_calls.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,9 +139,9 @@ def _parse_server_exception(response, url=None):
additional = None
if 'oml:additional_information' in server_exception['oml:error']:
additional = server_exception['oml:error']['oml:additional_information']
if code in [370, 372, 512, 500, 482]:
# 512 for runs, 370 for datasets (should be 372), 500 for flows
# 482 for tasks
if code in [372, 512, 500, 482, 542, 674]: # datasets,
# 512 for runs, 372 for datasets, 500 for flows
# 482 for tasks, 542 for evaluations, 674 for setups
return OpenMLServerNoResult(code, message, additional)
return OpenMLServerException(
code=code,
Expand Down
2 changes: 1 addition & 1 deletion openml/datasets/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ def _list_datasets(api_call):
try:
xml_string = _perform_api_call(api_call)
except OpenMLServerNoResult:
return []
return dict()
datasets_dict = xmltodict.parse(xml_string, force_list=('oml:dataset',))

# Minimalistic check if the XML is useful
Expand Down
7 changes: 5 additions & 2 deletions openml/evaluations/functions.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import xmltodict

from openml.exceptions import OpenMLServerNoResult
from .._api_calls import _perform_api_call
from ..evaluations import OpenMLEvaluation

Expand Down Expand Up @@ -59,8 +60,10 @@ def list_evaluations(function, offset=None, size=None, id=None, task=None,

def _list_evaluations(api_call):
"""Helper function to parse API calls which are lists of runs"""

xml_string = _perform_api_call(api_call)
try:
xml_string = _perform_api_call(api_call)
except OpenMLServerNoResult:
return dict()

evals_dict = xmltodict.parse(xml_string, force_list=('oml:evaluation',))
# Minimalistic check if the XML is useful
Expand Down
2 changes: 1 addition & 1 deletion openml/flows/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ def _list_flows(api_call):
try:
xml_string = _perform_api_call(api_call)
except OpenMLServerNoResult:
return []
return dict()
flows_dict = xmltodict.parse(xml_string, force_list=('oml:flow',))

# Minimalistic check if the XML is useful
Expand Down
2 changes: 1 addition & 1 deletion openml/runs/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -948,7 +948,7 @@ def _list_runs(api_call):
try:
xml_string = _perform_api_call(api_call)
except OpenMLServerNoResult:
return []
return dict()

runs_dict = xmltodict.parse(xml_string, force_list=('oml:run',))
# Minimalistic check if the XML is useful
Expand Down
6 changes: 5 additions & 1 deletion openml/setups/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from .. import config
from .setup import OpenMLSetup, OpenMLParameter
from openml.flows import flow_exists
from openml.exceptions import OpenMLServerNoResult


def setup_exists(flow, model=None):
Expand Down Expand Up @@ -145,7 +146,10 @@ def list_setups(flow=None, tag=None, setup=None, offset=None, size=None):
def _list_setups(api_call):
"""Helper function to parse API calls which are lists of setups"""

xml_string = openml._api_calls._perform_api_call(api_call)
try:
xml_string = openml._api_calls._perform_api_call(api_call)
except OpenMLServerNoResult:
return dict()

setups_dict = xmltodict.parse(xml_string, force_list=('oml:setup',))
# Minimalistic check if the XML is useful
Expand Down
1 change: 1 addition & 0 deletions openml/study/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from openml.study import OpenMLStudy
from .._api_calls import _perform_api_call


def _multitag_to_list(result_dict, tag):
if isinstance(result_dict[tag], list):
return result_dict[tag]
Expand Down
2 changes: 1 addition & 1 deletion openml/tasks/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ def _list_tasks(api_call):
try:
xml_string = _perform_api_call(api_call)
except OpenMLServerNoResult:
return []
return dict()
tasks_dict = xmltodict.parse(xml_string, force_list=('oml:task','oml:input'))
# Minimalistic check if the XML is useful
if 'oml:tasks' not in tasks_dict:
Expand Down
8 changes: 8 additions & 0 deletions tests/test_datasets/test_dataset_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,14 @@ def test_list_datasets_paginate(self):
for did in datasets:
self._check_dataset(datasets[did])

def test_list_datasets_empty(self):
datasets = openml.datasets.list_datasets(tag='NoOneWouldUseThisTagAnyway')
if len(datasets) > 0:
raise ValueError('UnitTest Outdated, tag was already used (please remove)')

self.assertIsInstance(datasets, dict)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You should also check that the dictionary is actually empty.



@unittest.skip('See https://github.com/openml/openml-python/issues/149')
def test_check_datasets_active(self):
active = openml.datasets.check_datasets_active([1, 17])
Expand Down
7 changes: 7 additions & 0 deletions tests/test_evaluations/test_evaluation_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,3 +63,10 @@ def test_evaluation_list_limit(self):

evaluations = openml.evaluations.list_evaluations("predictive_accuracy", size=100, offset=100)
self.assertEquals(len(evaluations), 100)

def test_list_evaluations_empty(self):
evaluations = openml.evaluations.list_evaluations('unexisting_measure')
if len(evaluations) > 0:
raise ValueError('UnitTest Outdated, got somehow results')

self.assertIsInstance(evaluations, dict)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same here.

7 changes: 7 additions & 0 deletions tests/test_flows/test_flow_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,13 @@ def test_list_flows(self):
for fid in flows:
self._check_flow(flows[fid])

def test_list_flows_empty(self):
flows = openml.flows.list_flows(tag='NoOneEverUsesThisTag123')
if len(flows) > 0:
raise ValueError('UnitTest Outdated, got somehow results (please adapt)')

self.assertIsInstance(flows, dict)

def test_list_flows_by_tag(self):
flows = openml.flows.list_flows(tag='weka')
self.assertGreaterEqual(len(flows), 5)
Expand Down
7 changes: 7 additions & 0 deletions tests/test_runs/test_run_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -830,6 +830,13 @@ def test_get_runs_list(self):
for rid in runs:
self._check_run(runs[rid])

def test_list_runs_empty(self):
runs = openml.runs.list_runs(task=[-1])
if len(runs) > 0:
raise ValueError('UnitTest Outdated, got somehow results')

self.assertIsInstance(runs, dict)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same here.


def test_get_runs_list_by_task(self):
# TODO: comes from live, no such lists on test
openml.config.server = self.production_server
Expand Down
7 changes: 7 additions & 0 deletions tests/test_setups/test_setup_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,13 @@ def test_setup_list_filter_flow(self):
for setup_id in setups.keys():
self.assertEquals(setups[setup_id].flow_id, flow_id)

def test_list_setups_empty(self):
setups = openml.setups.list_setups(setup=[-1])
if len(setups) > 0:
raise ValueError('UnitTest Outdated, got somehow results')

self.assertIsInstance(setups, dict)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same here.


def test_setuplist_offset(self):
# TODO: remove after pull on live for better testing
# openml.config.server = self.production_server
Expand Down
7 changes: 7 additions & 0 deletions tests/test_tasks/test_task_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,13 @@ def test_list_tasks_by_type(self):
self.assertEquals(ttid, tasks[tid]["ttid"])
self._check_task(tasks[tid])

def test_list_tasks_empty(self):
tasks = openml.tasks.list_tasks(tag='NoOneWillEverUseThisTag')
if len(tasks) > 0:
raise ValueError('UnitTest Outdated, got somehow results (tag is used, please adapt)')

self.assertIsInstance(tasks, dict)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same here.


def test_list_tasks_by_tag(self):
num_basic_tasks = 100 # number is flexible, check server if fails
tasks = openml.tasks.list_tasks(tag='study_14')
Expand Down