Commit ae90c5b4 authored by PLian's avatar PLian
Browse files

Set the default score to 'Accuracy'

parent 2849cbc7
......@@ -10,7 +10,9 @@ before_script:
stages:
- deps
- lint
- lint_1
- lint_2
- lint_3
- build
- test
......@@ -23,17 +25,17 @@ setup-venv:
- .venv/
flake8:
stage: lint
stage: lint_1
script:
- flake8 param_runner
pydocstyle:
stage: lint
stage: lint_2
script:
- pydocstyle param_runner
bandit:
stage: lint
stage: lint_3
allow_failure: true
script:
- bandit -x test -r param_runner
......
"""param_runner explores parameter spaces."""
__version__ = '2.0.0a1'
__version__ = '2.0.1a1'
......@@ -9,6 +9,7 @@ from abc import ABCMeta, abstractmethod
from pathos.multiprocessing import Pool
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class BaseExecutor(object):
......@@ -92,6 +93,9 @@ class BaseExecutor(object):
def write_trace(self, summary_dict):
"""Write a single line to the trace file, summarizing a task."""
print 'Start to writing trace ...'
print summary_dict
try:
self._write_trace(summary_dict)
except Exception as e:
......@@ -183,6 +187,17 @@ class BaseExecutor(object):
else:
summary_dict[summary_val['id']] = val
logger.debug('The summary_dict is:')
logger.debug(summary_dict)
logger.debug('')
logger.debug('The self.params.vals are:')
logger.debug(self.params.vals)
logger.debug('')
#
# Since a default score was set to 'Accuracy' in param.py, this if is not necessary anymore.
# But, will keep it till a better way to deal with the score was found.
#
if "score" in self.params.vals:
if self.params.vals["score"] in summary_dict:
score_name = self.params.vals["score"]
......
......@@ -18,6 +18,7 @@ class LocalExecutor(BaseExecutor):
def __init__(self, cwd, params):
"""Initialize local execution, computing concurrent tasks to run."""
logger.setLevel(logging.DEBUG)
logger.info(" - Initializing pool for local tasks")
cpus = multiprocessing.cpu_count()
logger.info(" - %d CPUs available on this machine" % cpus)
......
......@@ -5,6 +5,7 @@ import logging
from base import BaseOptimizer
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class GridSearchOptimizer(BaseOptimizer):
......@@ -12,7 +13,7 @@ class GridSearchOptimizer(BaseOptimizer):
task_index = 0
best_task = {
'task_index': None,
'task_index': -1,
'score': 1e99
}
......@@ -31,6 +32,7 @@ class GridSearchOptimizer(BaseOptimizer):
pool.join()
logger.info(" - Optimization completed after %d tasks run" % tasks_total)
logger.info(" - Best score: %f\ttask %d", self.best_task['score'],
self.best_task['task_index'])
......
......@@ -13,6 +13,7 @@ from yamllint.cli import Format
from yamllint.config import YamlLintConfig
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# PATH TO MODULECMD
......@@ -196,7 +197,15 @@ class ParamFile(object):
raise ValueError("Specified score '%s' is not a defined summary" %
self.vals['score'])
logging.warning(" - No score summary was provided")
#
# Set the default score to Accuracy
# TODO: need a better way to obtain the input score!
#
else:
logging.warning(" - No score summary was provided. Using 'Accuracy' as default.")
self.vals['score'] = 'Accuracy'
# logging.warning(" - No score summary was provided")
def __compute_param_ranges(self):
"""Compute the parameter ranges as lists, from their definitions."""
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment