-
Notifications
You must be signed in to change notification settings - Fork 2
/
scheduler.py
56 lines (37 loc) · 2.06 KB
/
scheduler.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
#!/usr/bin/env python3.7
from typing import Any, Callable, List, Tuple
from operator import add
from functools import partial
from utils import map_, uc_
class DummyScheduler(object):
def __call__(self, epoch: int, optimizer: Any, loss_fns: List[List[Callable]], loss_weights: List[List[float]]) \
-> Tuple[float, List[List[Callable]], List[List[float]]]:
return optimizer, loss_fns, loss_weights
class AddWeightLoss():
def __init__(self, to_add: List[float]):
self.to_add: List[float] = to_add
def __call__(self, epoch: int, optimizer: Any, loss_fns: List[List[Callable]], loss_weights: List[List[float]]) \
-> Tuple[float, List[List[Callable]], List[List[float]]]:
assert len(self.to_add) == len(loss_weights)
new_weights: List[List[float]] = map_(lambda w: map_(uc_(add), zip(w, self.to_add)), loss_weights)
print(f"Loss weights went from {loss_weights} to {new_weights}")
return optimizer, loss_fns, new_weights
class StealWeight():
def __init__(self, to_steal: float):
self.to_steal: float = to_steal
def __call__(self, epoch: int, optimizer: Any, loss_fns: List[List[Callable]], loss_weights: List[List[float]]) \
-> Tuple[float, List[List[Callable]], List[List[float]]]:
new_weights: List[List[float]] = [[max(0.1, a - self.to_steal), b + self.to_steal] for a, b in loss_weights]
print(f"Loss weights went from {loss_weights} to {new_weights}")
return optimizer, loss_fns, new_weights
class MultiplyT():
def __init__(self, target_loss: str, mu: float):
self.target_loss: str = target_loss
self.mu: float = mu
def __call__(self, epoch: int, optimizer: Any, loss_fns: List[List[Callable]], loss_weights: List[List[float]]) \
-> Tuple[float, List[List[Callable]], List[List[float]]]:
def update(loss: Any):
if loss.__class__.__name__ == self.target_loss:
loss.t *= self.mu
return loss
return optimizer, map_(lambda l: map_(update, l), loss_fns), loss_weights