Skip to content

Commit 1e6b065

Browse files
committed
Adds functionality to compute speedups
Geometric means across individual workload speedups between two algorithms.
1 parent 9b6c845 commit 1e6b065

1 file changed

Lines changed: 112 additions & 0 deletions

File tree

scoring/compute_speedups.py

Lines changed: 112 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,112 @@
1+
"""File to compute speedups (i.e. geometric means between runtimes)."""
2+
3+
import pickle
4+
5+
from absl import app
6+
from absl import flags
7+
import numpy as np
8+
import pandas as pd
9+
from performance_profile import BASE_WORKLOADS
10+
from performance_profile import get_workloads_time_to_target
11+
from scipy import stats
12+
13+
flags.DEFINE_string('results_txt', None, 'Path to full scoring results file.')
14+
flags.DEFINE_string(
15+
'base',
16+
'prize_qualification_baseline',
17+
'Base submission to compare to. Defaults to the `prize_qualification_baseline`.'
18+
)
19+
flags.DEFINE_string('comparison', None, 'Submission to compute the speedup of.')
20+
flags.DEFINE_boolean('self_tuning_ruleset',
21+
False,
22+
'Whether the self-tuning ruleset is being scored.')
23+
flags.DEFINE_boolean('save_results',
24+
False,
25+
'Whether to save the results to disk.')
26+
FLAGS = flags.FLAGS
27+
28+
MAX_BUDGETS = {
29+
'criteo1tb': 7703,
30+
'fastmri': 8859,
31+
'imagenet_resnet': 63_008,
32+
'imagenet_vit': 77_520,
33+
'librispeech_conformer': 61_068,
34+
'librispeech_deepspeech': 55_506,
35+
'ogbg': 18_477,
36+
'wmt': 48_151,
37+
}
38+
39+
40+
def replace_inf(row):
41+
"""Replace ifs with maximum runtime budget (+1 second).
42+
43+
Args:
44+
row (pd.Series): The original row.
45+
46+
Returns:
47+
pd.Series: The row with infs replaced.
48+
"""
49+
workload_name = row.name
50+
# Factor of 3 for self-tuning ruleset
51+
factor = 3 if FLAGS.self_tuning_ruleset else 1
52+
max_runtime_workload = factor * MAX_BUDGETS[workload_name]
53+
row.replace(np.inf, max_runtime_workload + 1, inplace=True)
54+
return row
55+
56+
57+
def compute_speedup():
58+
"""Compute speedup between two algorithms."""
59+
# Load results from disk
60+
with open(FLAGS.results_txt, 'rb') as f:
61+
results = pickle.load(f)
62+
63+
# Compute median over runtimes for both training algorithms
64+
base_results = get_workloads_time_to_target(
65+
results[FLAGS.base],
66+
FLAGS.base,
67+
time_col="score",
68+
self_tuning_ruleset=FLAGS.self_tuning_ruleset,
69+
)
70+
comparison_results = get_workloads_time_to_target(
71+
results[FLAGS.comparison],
72+
FLAGS.comparison,
73+
time_col="score",
74+
self_tuning_ruleset=FLAGS.self_tuning_ruleset,
75+
)
76+
77+
# Merge results
78+
merged_results = pd.concat([base_results, comparison_results]).transpose()
79+
80+
# Ignore workload variants (only consider base workloads) for speedup
81+
merged_results = merged_results.loc[merged_results.index.isin(BASE_WORKLOADS)]
82+
83+
# Replace infs with maximum runtime budget (+1 second)
84+
merged_results = merged_results.apply(replace_inf, axis=1)
85+
86+
# Compute speedup
87+
merged_results['speedup'] = merged_results[
88+
f'{FLAGS.comparison}'] / merged_results[f'{FLAGS.base}']
89+
speedups = merged_results['speedup'].to_numpy()
90+
mean_speedup = stats.gmean(speedups) # Geometric mean over workload speedups
91+
92+
print(merged_results, end='\n\n')
93+
print(
94+
f"Average speedup of {FLAGS.comparison} compared to {FLAGS.base}: {mean_speedup} or roughly {(1-mean_speedup):.1%}"
95+
)
96+
97+
if FLAGS.save_results:
98+
# Optionally save results to disk
99+
print("Saving results to disk...")
100+
filename = f'{FLAGS.comparison}_vs_{FLAGS.base}_speedup_{(1-mean_speedup):.1%}.csv'
101+
merged_results.to_csv(filename)
102+
103+
104+
def main(_):
105+
"""Main function to compute speedup between two algorithms."""
106+
compute_speedup()
107+
108+
109+
if __name__ == '__main__':
110+
flags.mark_flag_as_required('results_txt')
111+
flags.mark_flag_as_required('comparison')
112+
app.run(main)

0 commit comments

Comments
 (0)