Skip to content

Commit c465e25

Browse files
Merge pull request #778 from fsschneider/scoring_QoL
Minor fix scoring code & speedup computation
2 parents 38554d1 + 5168eb5 commit c465e25

3 files changed

Lines changed: 121 additions & 1 deletion

File tree

scoring/compute_speedups.py

Lines changed: 112 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,112 @@
1+
"""File to compute speedups (i.e. geometric means between runtimes)."""
2+
3+
import pickle
4+
5+
from absl import app
6+
from absl import flags
7+
import numpy as np
8+
import pandas as pd
9+
from performance_profile import BASE_WORKLOADS
10+
from performance_profile import get_workloads_time_to_target
11+
from scipy import stats
12+
13+
flags.DEFINE_string('results_txt', None, 'Path to full scoring results file.')
14+
flags.DEFINE_string(
15+
'base',
16+
'prize_qualification_baseline',
17+
'Base submission to compare to. Defaults to the `prize_qualification_baseline`.'
18+
)
19+
flags.DEFINE_string('comparison', None, 'Submission to compute the speedup of.')
20+
flags.DEFINE_boolean('self_tuning_ruleset',
21+
False,
22+
'Whether the self-tuning ruleset is being scored.')
23+
flags.DEFINE_boolean('save_results',
24+
False,
25+
'Whether to save the results to disk.')
26+
FLAGS = flags.FLAGS
27+
28+
MAX_BUDGETS = {
29+
'criteo1tb': 7703,
30+
'fastmri': 8859,
31+
'imagenet_resnet': 63_008,
32+
'imagenet_vit': 77_520,
33+
'librispeech_conformer': 61_068,
34+
'librispeech_deepspeech': 55_506,
35+
'ogbg': 18_477,
36+
'wmt': 48_151,
37+
}
38+
39+
40+
def replace_inf(row):
41+
"""Replace ifs with maximum runtime budget (+1 second).
42+
43+
Args:
44+
row (pd.Series): The original row.
45+
46+
Returns:
47+
pd.Series: The row with infs replaced.
48+
"""
49+
workload_name = row.name
50+
# Factor of 3 for self-tuning ruleset
51+
factor = 3 if FLAGS.self_tuning_ruleset else 1
52+
max_runtime_workload = factor * MAX_BUDGETS[workload_name]
53+
row.replace(np.inf, max_runtime_workload + 1, inplace=True)
54+
return row
55+
56+
57+
def compute_speedup():
58+
"""Compute speedup between two algorithms."""
59+
# Load results from disk
60+
with open(FLAGS.results_txt, 'rb') as f:
61+
results = pickle.load(f)
62+
63+
# Compute median over runtimes for both training algorithms
64+
base_results = get_workloads_time_to_target(
65+
results[FLAGS.base],
66+
FLAGS.base,
67+
time_col="score",
68+
self_tuning_ruleset=FLAGS.self_tuning_ruleset,
69+
)
70+
comparison_results = get_workloads_time_to_target(
71+
results[FLAGS.comparison],
72+
FLAGS.comparison,
73+
time_col="score",
74+
self_tuning_ruleset=FLAGS.self_tuning_ruleset,
75+
)
76+
77+
# Merge results
78+
merged_results = pd.concat([base_results, comparison_results]).transpose()
79+
80+
# Ignore workload variants (only consider base workloads) for speedup
81+
merged_results = merged_results.loc[merged_results.index.isin(BASE_WORKLOADS)]
82+
83+
# Replace infs with maximum runtime budget (+1 second)
84+
merged_results = merged_results.apply(replace_inf, axis=1)
85+
86+
# Compute speedup
87+
merged_results['speedup'] = merged_results[
88+
f'{FLAGS.comparison}'] / merged_results[f'{FLAGS.base}']
89+
speedups = merged_results['speedup'].to_numpy()
90+
mean_speedup = stats.gmean(speedups) # Geometric mean over workload speedups
91+
92+
print(merged_results, end='\n\n')
93+
print(
94+
f"Average speedup of {FLAGS.comparison} compared to {FLAGS.base}: {mean_speedup} or roughly {(1-mean_speedup):.1%}"
95+
)
96+
97+
if FLAGS.save_results:
98+
# Optionally save results to disk
99+
print("Saving results to disk...")
100+
filename = f'{FLAGS.comparison}_vs_{FLAGS.base}_speedup_{(1-mean_speedup):.1%}.csv'
101+
merged_results.to_csv(filename)
102+
103+
104+
def main(_):
105+
"""Main function to compute speedup between two algorithms."""
106+
compute_speedup()
107+
108+
109+
if __name__ == '__main__':
110+
flags.mark_flag_as_required('results_txt')
111+
flags.mark_flag_as_required('comparison')
112+
app.run(main)

scoring/performance_profile.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -307,6 +307,14 @@ def compute_performance_profiles(submissions,
307307
strict))
308308
df = pd.concat(dfs)
309309

310+
# For each held-out workload set to inf if the base workload is inf
311+
for workload in df.keys():
312+
if workload not in BASE_WORKLOADS:
313+
# If base do not have finite score set variant score to inf
314+
base_workload = get_base_workload_name(workload)
315+
df[workload] = df.apply(
316+
variant_criteria_filter(workload, base_workload), axis=1)
317+
310318
# Set score to inf if not within 4x of fastest submission
311319
best_scores = df.min(axis=0)
312320
df[df.apply(lambda x: x > 4 * best_scores, axis=1)] = np.inf

scoring/score_submissions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,7 @@ def main(_):
198198
results,
199199
time_col='score',
200200
min_tau=1.0,
201-
max_tau=None,
201+
max_tau=4.0,
202202
reference_submission_tag=None,
203203
num_points=100,
204204
scale='linear',

0 commit comments

Comments
 (0)