Skip to content

Commit e125201

Browse files
committed
formatting
1 parent a3e513e commit e125201

2 files changed

Lines changed: 45 additions & 39 deletions

File tree

scoring/performance_profile.py

Lines changed: 30 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -70,28 +70,35 @@
7070

7171
#MPL params
7272
mpl.rcParams['figure.figsize'] = (16, 10) # Width, height in inches
73-
mpl.rcParams['font.family'] = 'serif'
74-
mpl.rcParams['font.serif'] = ['Times New Roman'] + mpl.rcParams['font.serif'] # Add Times New Roman as first choice
73+
mpl.rcParams['font.family'] = 'serif'
74+
mpl.rcParams['font.serif'] = [
75+
'Times New Roman'
76+
] + mpl.rcParams['font.serif'] # Add Times New Roman as first choice
7577
mpl.rcParams['font.size'] = 22
7678
mpl.rcParams['savefig.dpi'] = 300 # Set resolution for saved figures
7779

7880
# Plot Elements
79-
mpl.rcParams['lines.linewidth'] = 3 # Adjust line thickness if needed
80-
mpl.rcParams['lines.markersize'] = 6 # Adjust marker size if needed
81-
mpl.rcParams['axes.prop_cycle'] = mpl.cycler(color=["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd"]) # Example color cycle (consider ColorBrewer or viridis)
82-
mpl.rcParams['axes.labelsize'] = 22 # Axis label font size
83-
mpl.rcParams['xtick.labelsize'] = 20 # Tick label font size
81+
mpl.rcParams['lines.linewidth'] = 3 # Adjust line thickness if needed
82+
mpl.rcParams['lines.markersize'] = 6 # Adjust marker size if needed
83+
mpl.rcParams['axes.prop_cycle'] = mpl.cycler(
84+
color=["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728",
85+
"#9467bd"]) # Example color cycle (consider ColorBrewer or viridis)
86+
mpl.rcParams['axes.labelsize'] = 22 # Axis label font size
87+
mpl.rcParams['xtick.labelsize'] = 20 # Tick label font size
8488
mpl.rcParams['ytick.labelsize'] = 20
8589

8690
# Legends and Gridlines
87-
mpl.rcParams['legend.fontsize'] = 20 # Legend font size
88-
mpl.rcParams['legend.loc'] = 'best' # Let matplotlib decide the best legend location
89-
mpl.rcParams['axes.grid'] = True # Enable grid
90-
mpl.rcParams['grid.alpha'] = 0.4 # Gridline transparency
91+
mpl.rcParams['legend.fontsize'] = 20 # Legend font size
92+
mpl.rcParams[
93+
'legend.loc'] = 'best' # Let matplotlib decide the best legend location
94+
mpl.rcParams['axes.grid'] = True # Enable grid
95+
mpl.rcParams['grid.alpha'] = 0.4 # Gridline transparency
96+
9197

9298
def print_dataframe(df):
93-
tabulated_df = tabulate(df.T, headers='keys', tablefmt='psql')
94-
logging.info(tabulated_df)
99+
tabulated_df = tabulate(df.T, headers='keys', tablefmt='psql')
100+
logging.info(tabulated_df)
101+
95102

96103
def generate_eval_cols(metrics):
97104
splits = ['train', 'validation']
@@ -206,11 +213,13 @@ def get_workloads_time_to_target(submission,
206213
num_trials = len(group)
207214
if num_trials != NUM_TRIALS and not self_tuning_ruleset:
208215
if strict:
209-
raise ValueError(f'In Study {study}: Expecting {NUM_TRIALS} trials for workload '
210-
f'{workload} but found {num_trials} trials.')
216+
raise ValueError(
217+
f'In Study {study}: Expecting {NUM_TRIALS} trials for workload '
218+
f'{workload} but found {num_trials} trials.')
211219
else:
212-
logging.warning(f'In Study {study}: Expecting {NUM_TRIALS} trials for workload '
213-
f'{workload} but found {num_trials} trials.')
220+
logging.warning(
221+
f'In Study {study}: Expecting {NUM_TRIALS} trials for workload '
222+
f'{workload} but found {num_trials} trials.')
214223

215224
# Get trial and time index that reaches target
216225
trial_idx, time_idx = get_best_trial_index(
@@ -316,9 +325,8 @@ def compute_performance_profiles(submissions,
316325
# If variants do not have finite score set base_workload score to inf
317326
base_workload = get_base_workload_name(workload)
318327
df[base_workload] = df.apply(
319-
variant_criteria_filter(base_workload, workload),
320-
axis=1)
321-
328+
variant_criteria_filter(base_workload, workload), axis=1)
329+
322330
logging.info("HELDOUT_WORKLOAD FILTER")
323331
print_dataframe(df)
324332

@@ -415,8 +423,7 @@ def plot_performance_profiles(perf_df,
415423
df_col,
416424
scale='linear',
417425
save_dir=None,
418-
figsize=(30, 10)
419-
):
426+
figsize=(30, 10)):
420427
"""Plot performance profiles.
421428
422429
Args:
@@ -438,8 +445,7 @@ def plot_performance_profiles(perf_df,
438445
"""
439446
fig = perf_df.T.plot(figsize=figsize, alpha=0.7)
440447
df_col_display = f'log10({df_col})' if scale == 'log' else df_col
441-
fig.set_xlabel(
442-
f'Ratio of `{df_col_display}` to best submission')
448+
fig.set_xlabel(f'Ratio of `{df_col_display}` to best submission')
443449
fig.set_ylabel('Proportion of workloads')
444450
fig.legend(bbox_to_anchor=(1.0, 1.0))
445451
plt.tight_layout()

scoring/score_submissions.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -124,21 +124,21 @@ def main(_):
124124
results = {}
125125
os.makedirs(FLAGS.output_dir, exist_ok=True)
126126

127-
# for team in os.listdir(FLAGS.submission_directory):
128-
# for submission in os.listdir(os.path.join(FLAGS.submission_directory, team)):
129-
# print(submission)
130-
# experiment_path = os.path.join(FLAGS.submission_directory, team, submission)
131-
# df = scoring_utils.get_experiment_df(experiment_path)
132-
# results[submission] = df
133-
# summary_df = get_submission_summary(df)
134-
# with open(os.path.join(FLAGS.output_dir, f'{submission}_summary.csv'),
135-
# 'w') as fout:
136-
# summary_df.to_csv(fout)
137-
138-
# # Save results
139-
# with open(os.path.join(FLAGS.output_dir, 'results.pkl'), 'wb') as f:
140-
# pickle.dump(results, f)
141-
127+
# for team in os.listdir(FLAGS.submission_directory):
128+
# for submission in os.listdir(os.path.join(FLAGS.submission_directory, team)):
129+
# print(submission)
130+
# experiment_path = os.path.join(FLAGS.submission_directory, team, submission)
131+
# df = scoring_utils.get_experiment_df(experiment_path)
132+
# results[submission] = df
133+
# summary_df = get_submission_summary(df)
134+
# with open(os.path.join(FLAGS.output_dir, f'{submission}_summary.csv'),
135+
# 'w') as fout:
136+
# summary_df.to_csv(fout)
137+
138+
# # Save results
139+
# with open(os.path.join(FLAGS.output_dir, 'results.pkl'), 'wb') as f:
140+
# pickle.dump(results, f)
141+
142142
# Read results
143143
with open(os.path.join(FLAGS.output_dir, 'results.pkl'), 'rb') as f:
144144
results = pickle.load(f)

0 commit comments

Comments
 (0)