Skip to content

Commit f1774fd

Browse files
committed
merge
2 parents 5ac8fa6 + 5f733e1 commit f1774fd

3 files changed

Lines changed: 10 additions & 7 deletions

File tree

algoperf/pytorch_utils.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@ def pytorch_setup() -> Tuple[bool, int, torch.device, int]:
2727
# torch.backends.cudnn.fp32_precision = "ieee"
2828
# torch.backends.cudnn.conv.fp32_precision = "tf32"
2929
# torch.backends.cudnn.rnn.fp32_precision = "tf32"
30-
3130

3231
use_pytorch_ddp = 'LOCAL_RANK' in os.environ
3332
rank = int(os.environ['LOCAL_RANK']) if use_pytorch_ddp else 0

algoperf/workloads/finewebedu_lm/workload.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,11 +85,11 @@ def train_stddev(self):
8585

8686
@property
8787
def max_allowed_runtime_sec(self) -> int:
88-
return 31_967 # 8.9 hours
88+
return 31_967 # 8.9 hours
8989

9090
@property
9191
def eval_period_time_sec(self) -> int:
92-
return 2_571 # approximately 25 evals
92+
return 2_571 # approximately 25 evals
9393

9494
@property
9595
def step_hint(self) -> int:

scoring/score_submissions.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@
7070
flags.DEFINE_string(
7171
'include_submissions',
7272
'',
73-
'Optional comma seperated list of names of submissions to include from scoring.'
73+
'Optional comma seperated list of names of submissions to include from scoring.',
7474
)
7575
FLAGS = flags.FLAGS
7676

@@ -128,12 +128,14 @@ def get_summary_df(workload, workload_df, include_test_split=False):
128128
# compute the step times
129129
def delta(series):
130130
return series.shift(1, fill_value=0) - series
131-
accumulated_time_intervals = delta(workload_df['accumulated_submission_time']) # exclude first step
132-
step_intervals = delta(workload_df['global_step']) # exclude time up to first step
131+
accumulated_time_intervals = delta(workload_df['accumulated_submission_time'])
132+
step_intervals = delta(workload_df['global_step'])
133133
if len(accumulated_time_intervals) < 2:
134134
print(f"WARNING: The number of evals may be too low to calculate reliable step time for {workload}")
135135

136136
summary_df['step_time (s)'] = np.median((accumulated_time_intervals / step_intervals).iloc[0])
137+
138+
137139
summary_df['step_hint'] = scoring_utils.get_workload_stephint(workload)
138140

139141
# test metrics
@@ -224,7 +226,9 @@ def main(_):
224226

225227
for submission in all_submission_dirs:
226228
print(submission)
227-
if submission not in FLAGS.exclude_submissions.split(',') and (submission in include_submissions):
229+
if submission not in FLAGS.exclude_submissions.split(',') and (
230+
submission in include_submissions
231+
):
228232
experiment_path = os.path.join(FLAGS.submission_directory, submission)
229233
df = scoring_utils.get_experiment_df(experiment_path)
230234
results[submission] = df

0 commit comments

Comments
 (0)