-
Notifications
You must be signed in to change notification settings - Fork 1
/
02_run_benchmark.py
56 lines (45 loc) · 2.02 KB
/
02_run_benchmark.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
"""
Run the actual benchmark.
You can run it multiple times, but it will only execute
the missing configurations. This does not save much time
here but when you have to deal with algorithms that
run for minutes, it is a life saver.
"""
import networkx as nx
from _utils import InstanceDb
from algbench import Benchmark
benchmark = Benchmark("03_benchmark_data")
instances = InstanceDb("./01_instances.zip")
def load_instance_and_run(instance_name: str, alg_params):
# load the instance outside the actual measurement
g = instances[instance_name]
def eval_greedy_alg(instance_name: str, alg_params, _instance: nx.Graph):
# arguments starting with `_` are not saved.
coloring = nx.coloring.greedy_coloring.greedy_color(_instance, **alg_params)
return { # the returned values are saved to the database
"num_vertices": _instance.number_of_nodes(),
"num_edges": _instance.number_of_edges(),
"coloring": coloring,
"n_colors": max(coloring.values()) + 1,
}
benchmark.add(eval_greedy_alg, instance_name, alg_params, g)
alg_params_to_evaluate = [
{"strategy": "largest_first", "interchange": True},
{"strategy": "largest_first", "interchange": False},
{"strategy": "random_sequential", "interchange": True},
{"strategy": "random_sequential", "interchange": False},
{"strategy": "smallest_last", "interchange": True},
{"strategy": "smallest_last", "interchange": False},
{"strategy": "independent_set"},
{"strategy": "connected_sequential_bfs", "interchange": True},
{"strategy": "connected_sequential_bfs", "interchange": False},
{"strategy": "connected_sequential_dfs", "interchange": True},
{"strategy": "connected_sequential_dfs", "interchange": False},
{"strategy": "saturation_largest_first"},
]
if __name__ == "__main__":
for instance_name in instances:
print(instance_name)
for conf in alg_params_to_evaluate:
load_instance_and_run(instance_name, conf)
benchmark.compress()