|
47 | 47 | False, |
48 | 48 | 'Whether to score on self-tuning ruleset or externally tuned ruleset') |
49 | 49 | flags.DEFINE_string( |
50 | | - 'save_results_to_filename', |
51 | | - None, |
52 | | - 'Filename to save the processed results that are fed into the performance profile functions' |
53 | | -) |
| 50 | + 'save_results_to_filename', |
| 51 | + None, |
| 52 | + 'Filename to save the processed results that are fed into the performance profile functions.') |
54 | 53 | flags.DEFINE_boolean( |
55 | | - 'load_results_from_filename', |
56 | | - None, |
57 | | - 'Filename to load processed results from that are fed into performance profile functions' |
58 | | -) |
| 54 | + 'load_results_from_filename', |
| 55 | + None, |
| 56 | + 'Filename to load processed results from that are fed into performance profile functions') |
59 | 57 | FLAGS = flags.FLAGS |
60 | 58 |
|
61 | 59 |
|
@@ -133,24 +131,26 @@ def main(_): |
133 | 131 | results = {} |
134 | 132 | os.makedirs(FLAGS.output_dir, exist_ok=True) |
135 | 133 |
|
136 | | - # for team in os.listdir(FLAGS.submission_directory): |
137 | | - # for submission in os.listdir(os.path.join(FLAGS.submission_directory, team)): |
138 | | - # print(submission) |
139 | | - # experiment_path = os.path.join(FLAGS.submission_directory, team, submission) |
140 | | - # df = scoring_utils.get_experiment_df(experiment_path) |
141 | | - # results[submission] = df |
142 | | - # summary_df = get_submission_summary(df) |
143 | | - # with open(os.path.join(FLAGS.output_dir, f'{submission}_summary.csv'), |
144 | | - # 'w') as fout: |
145 | | - # summary_df.to_csv(fout) |
146 | | - |
147 | | - # # Save results |
148 | | - # with open(os.path.join(FLAGS.output_dir, 'results.pkl'), 'wb') as f: |
149 | | - # pickle.dump(results, f) |
150 | | - |
151 | | - # Read results |
152 | | - with open(os.path.join(FLAGS.output_dir, 'results.pkl'), 'rb') as f: |
153 | | - results = pickle.load(f) |
| 134 | + # Optionally read results to filename |
| 135 | + if FLAGS.load_results_from_filename: |
| 136 | + with open(os.path.join(FLAGS.output_dir, FLAGS.load_results_from_filename), 'rb') as f: |
| 137 | + results = pickle.load(f) |
| 138 | + else: |
| 139 | + for team in os.listdir(FLAGS.submission_directory): |
| 140 | + for submission in os.listdir(os.path.join(FLAGS.submission_directory, team)): |
| 141 | + print(submission) |
| 142 | + experiment_path = os.path.join(FLAGS.submission_directory, team, submission) |
| 143 | + df = scoring_utils.get_experiment_df(experiment_path) |
| 144 | + results[submission] = df |
| 145 | + summary_df = get_submission_summary(df) |
| 146 | + with open(os.path.join(FLAGS.output_dir, f'{submission}_summary.csv'), |
| 147 | + 'w') as fout: |
| 148 | + summary_df.to_csv(fout) |
| 149 | + |
| 150 | + # Optionally save results to filename |
| 151 | + if FLAGS.save_results_to_filename: |
| 152 | + with open(os.path.join(FLAGS.output_dir, FLAGS.save_results_to_filename), 'wb') as f: |
| 153 | + pickle.dump(results, f) |
154 | 154 |
|
155 | 155 | if not FLAGS.strict: |
156 | 156 | logging.warning( |
|
0 commit comments