|
25 | 25 | BASE_DIR = os.path.abspath(os.path.join( os.path.dirname( __file__ ), '..' )) |
26 | 26 | sys.path.append(BASE_DIR) |
27 | 27 | from scripts.utils.mics import import_func, weights_init, zip_res |
28 | | -from scripts.utils.optim import WarmupCosLR |
29 | 28 | from scripts.network.loss_func import evaluate_leaderboard |
30 | 29 | from scripts.utils.av2_eval import write_output_file |
31 | 30 | from scripts.network.models.basic import cal_pose0to1 |
@@ -69,12 +68,6 @@ def __init__(self, cfg, eval=False): |
69 | 68 | self.av2_mode = None |
70 | 69 | if cfg.pretrained_weights is not None: |
71 | 70 | self.model.load_from_checkpoint(cfg.pretrained_weights) |
72 | | - |
73 | | - self.turn_lr_scheduler = False |
74 | | - if 'lr_scheduler' in cfg: |
75 | | - self.turn_lr_scheduler = cfg.lr_scheduler |
76 | | - self.min_lr = cfg.min_lr |
77 | | - self.warmup_epochs = max(1, int(self.epochs / 10)) |
78 | 71 |
|
79 | 72 | if 'dataset_path' in cfg: |
80 | 73 | self.dataset_path = cfg.dataset_path |
@@ -228,28 +221,11 @@ def validation_step(self, batch, batch_idx): |
228 | 221 | self.train_validation_step_(batch, res_dict) |
229 | 222 |
|
230 | 223 | def configure_optimizers(self): |
231 | | - # optimizer = optim.Adam(self.model.parameters(), lr=self.lr) |
232 | | - |
233 | | - # if self.turn_lr_scheduler: |
234 | | - # scheduler = WarmupCosLR(optimizer = optimizer, |
235 | | - # min_lr = self.min_lr, |
236 | | - # lr = self.lr, |
237 | | - # warmup_epochs = self.warmup_epochs, |
238 | | - # epochs = self.epochs) |
239 | | - # return [optimizer], [scheduler] |
240 | | - |
241 | | - # return optimizer |
242 | 224 | optimizer = optim.Adam(self.model.parameters(), lr=self.lr) |
243 | 225 | return optimizer |
244 | 226 |
|
245 | 227 | def on_train_epoch_start(self): |
246 | 228 | self.time_start_train_epoch = time.time() |
247 | | - # if self.current_epoch < self.warmup_epochs * 2: |
248 | | - # if self.current_epoch == 0: |
249 | | - # self.add_seloss.remove('cluster_flow_loss') |
250 | | - # else: |
251 | | - # if 'cluster_flow_loss' not in self.add_seloss: |
252 | | - # self.add_seloss.append('cluster_flow_loss') |
253 | 229 |
|
254 | 230 | def on_train_epoch_end(self): |
255 | 231 | self.log("pre_epoch_cost (mins)", (time.time()-self.time_start_train_epoch)/60.0, on_step=False, on_epoch=True, sync_dist=True) |
|
0 commit comments