Skip to content

Commit

Permalink
Adding exception handling to the CLI and documenting the compare switch
Browse files Browse the repository at this point in the history
  • Loading branch information
John Hawkins authored and John Hawkins committed May 24, 2023
1 parent 2e8e907 commit 2b3ab81
Show file tree
Hide file tree
Showing 4 changed files with 42 additions and 12 deletions.
16 changes: 16 additions & 0 deletions docs/source/usage.rst
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,23 @@ You can then list the results just for that specific dataset:
Compare Results
^^^^^^^^^^^^^^^^^^^^^

You can compare results across dataset using a simple CLI option.
The syntax requires that you provide a comma separates list of datasets as well as the metric you want to
use for the comparison. If you want to compare multiple metrics you will need to create multiple tables.

For example

.. code-block:: bash
>projit compare dataset1,dataset2,dataset3 RMSE
Will produce a table where each row corresponds to a specific experiment, and each column will correspond
to one of the three specfied datasets. Within the table each cell will contain the RMSE of the experiment
on that dataset.



Expand Down
2 changes: 1 addition & 1 deletion projit/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = "0.1.10"
__version__ = "0.1.11"

from .utils import locate_projit_config
from .projit import projit_load
Expand Down
20 changes: 17 additions & 3 deletions projit/cli.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import argparse
import pandas as pd
import numpy as np
import sys
import os

Expand Down Expand Up @@ -81,18 +82,24 @@ def task_compare(project, datasets, metric, markdown):
for the specified metric to compile the comparison dataset to display.
"""
title = "Compare Results"
warning = ""
results = None
for dataset in datasets:
rez = project.get_results(dataset)
if metric not in rez.columns:
rez[metric] = np.nan
warning += f"Metric '{metric}' not present for dataset '{dataset}'\n"
rez = rez.loc[:,['experiment',metric]]
rez.columns = ['experiment', dataset]
if results is None:
results = rez
else:
results = pd.merge(result,rez,on="experiment")
results = pd.merge(results,rez,on="experiment")

if len(warning) > 0:
print("*** WARNINGS ***")
print(warning)

if markdown:
print_results_markdown(title, results)
else:
Expand Down Expand Up @@ -323,9 +330,16 @@ def print_usage(prog):
print("")



##########################################################################################
def main():
try:
cli_main()
except Exception as e:
print("*** Projit CLI Error ***")
print(e)

##########################################################################################
def cli_main():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--version', help='Print Version', action='store_true')
parser.add_argument('-m', '--markdown', help='Use markdown for output', action='store_true')
Expand Down Expand Up @@ -394,7 +408,7 @@ def main():
if args.cmd == 'list':
task_list(args.subcmd, project, args.dataset, args.markdown)

if args.cmd == 'list':
if args.cmd == 'compare':
datasets = args.datasets.split(",")
task_compare(project, datasets, args.metric, args.markdown)

Expand Down
16 changes: 8 additions & 8 deletions projit/projit.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,17 +169,17 @@ def end_experiment(self, name, id, hyperparams={}):
"""

if not self.experiment_exists(name):
raise Exception(f"ERROR: Cannot end experiment: '{name}' -- Experiment not registered")
raise Exception(f"Projit Experiment Exception: Cannot end experiment: '{name}' -- Experiment not registered")

if name in self.executions:
exper_execs = self.executions[name]
else:
raise Exception(f"ERROR: Cannot end experiment: '{name}' -- Executions not started")
raise Exception(f"Projit Experiment Exception: Cannot end experiment: '{name}' -- Executions not started")

if id in exper_execs:
payload = exper_execs[id]
else:
raise Exception(f"ERROR: Cannot end experiment: '{name}' -- Executions not started")
raise Exception(f"Projit Experiment Exception: Cannot end experiment: '{name}' -- Executions not started")

payload['end'] = str(datetime.now())
payload['hyperparams'] = hyperparams
Expand Down Expand Up @@ -367,7 +367,7 @@ def add_hyperparam(self, name, value):
self.hyperparams[name] = value
self.save()
else:
raise Exception("ERROR: No experiment called: '%s' -- Register your experiment first." % name)
raise Exception("Projit Experiment Exception: No experiment called: '%s' -- Register your experiment first." % name)

def add_result(self, experiment, metric, value, dataset=None):
"""
Expand Down Expand Up @@ -432,7 +432,7 @@ def get_results(self, dataset=None):
if dataset in self.dataresults:
myresults = self.dataresults[dataset]
else:
raise Exception("ERROR: No results for dataset: %s " % dataset)
raise Exception("Projit Dataset Exception: No results for dataset: %s " % dataset)
for exp in self.experiments:
key = exp[0]
if key in myresults:
Expand Down Expand Up @@ -461,19 +461,19 @@ def get_dataset(self, name):
if name in self.datasets:
return self.datasets[name]
else:
raise Exception("ERROR: Named dataset '%s' not available:" % name)
raise Exception("Projit Dataset Exception: Named dataset '%s' not available. Register your dataset" % name)

def get_param(self, name):
if name in self.params:
return self.params[name]
else:
raise Exception("ERROR: Named parameter '%s' is not available:" % name)
raise Exception("Projit Parameter Exception: Named parameter '%s' is not available:" % name)

def get_hyperparam(self, name):
if name in self.hyperparams:
return self.hyperparams[name]
else:
raise Exception("ERROR: Hyper parameters for experiemnt '%s' are not available:" % name)
raise Exception("Projit Parameter Exception: Hyper parameters for experiemnt '%s' are not available:" % name)

def get_path_to_dataset(self, name):
ds = self.get_dataset(name)
Expand Down

0 comments on commit 2b3ab81

Please sign in to comment.