Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 37 additions & 16 deletions autosklearn/evaluation/__init__.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
# -*- encoding: utf-8 -*-
import functools
import json
import math
import multiprocessing
from queue import Empty
import time
import traceback
from typing import Tuple
from typing import Dict, List, Optional, Tuple, Union

from ConfigSpace import Configuration
import numpy as np
import pynisher
from smac.runhistory.runhistory import RunInfo, RunValue
Expand Down Expand Up @@ -81,6 +83,14 @@ def get_cost_of_crash(metric):
return worst_possible_result


def _encode_exit_status(exit_status):
try:
json.dumps(exit_status)
return exit_status
except (TypeError, OverflowError):
return str(exit_status)


# TODO potentially log all inputs to this class to pickle them in order to do
# easier debugging of potential crashes
class ExecuteTaFuncWithQueue(AbstractTAFunc):
Expand Down Expand Up @@ -224,11 +234,15 @@ def run_wrapper(

return super().run_wrapper(run_info=run_info)

def run(self, config, instance=None,
cutoff=None,
seed=12345,
budget=0.0,
instance_specific=None):
def run(
self,
config: Configuration,
instance: Optional[str] = None,
cutoff: Optional[float] = None,
seed: int = 12345,
budget: float = 0.0,
instance_specific: Optional[str] = None,
) -> Tuple[StatusType, float, float, Dict[str, Union[int, float, str, Dict, List, Tuple]]]:

queue = multiprocessing.Queue()

Expand Down Expand Up @@ -272,11 +286,19 @@ def run(self, config, instance=None,
obj_kwargs['resampling_strategy'] = self.resampling_strategy
obj_kwargs['resampling_strategy_args'] = self.resampling_strategy_args

obj = pynisher.enforce_limits(**arguments)(self.ta)
obj(**obj_kwargs)

if obj.exit_status in (pynisher.TimeoutException,
pynisher.MemorylimitException):
try:
obj = pynisher.enforce_limits(**arguments)(self.ta)
obj(**obj_kwargs)
except Exception as e:
exception_traceback = traceback.format_exc()
error_message = repr(e)
additional_info = {
'traceback': exception_traceback,
'error': error_message
}
return StatusType.CRASHED, self.cost_for_crash, 0.0, additional_info

if obj.exit_status in (pynisher.TimeoutException, pynisher.MemorylimitException):
# Even if the pynisher thinks that a timeout or memout occured,
# it can be that the target algorithm wrote something into the queue
# - then we treat it as a succesful run
Expand Down Expand Up @@ -309,8 +331,7 @@ def run(self, config, instance=None,
elif obj.exit_status is pynisher.MemorylimitException:
status = StatusType.MEMOUT
additional_run_info = {
'error': 'Memout (used more than %d MB).' %
self.memory_limit
'error': 'Memout (used more than %d MB).' % self.memory_limit
}
else:
raise ValueError(obj.exit_status)
Expand All @@ -322,7 +343,7 @@ def run(self, config, instance=None,
cost = self.worst_possible_result
additional_run_info = {'error': 'Your configuration of '
'auto-sklearn does not work!',
'exit_status': obj.exit_status,
'exit_status': _encode_exit_status(obj.exit_status),
'subprocess_stdout': obj.stdout,
'subprocess_stderr': obj.stderr,
}
Expand All @@ -343,14 +364,14 @@ def run(self, config, instance=None,
'because the pynisher exit ' \
'status %s is unknown.' % \
str(obj.exit_status)
additional_run_info['exit_status'] = obj.exit_status
additional_run_info['exit_status'] = _encode_exit_status(obj.exit_status)
additional_run_info['subprocess_stdout'] = obj.stdout
additional_run_info['subprocess_stderr'] = obj.stderr
except Empty:
info = None
additional_run_info = {
'error': 'Result queue is empty',
'exit_status': obj.exit_status,
'exit_status': _encode_exit_status(obj.exit_status),
'subprocess_stdout': obj.stdout,
'subprocess_stderr': obj.stderr,
'exitcode': obj.exitcode
Expand Down