Skip to content

Commit

Permalink
upgrade project
Browse files Browse the repository at this point in the history
  • Loading branch information
linjing-lab committed Oct 29, 2023
1 parent 15e7128 commit c61ed24
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 14 deletions.
2 changes: 1 addition & 1 deletion released_box/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ save or load:
|data_loader|features: TabularData<br />labels: TabularData<br />ratio_set: Dict[str, int]={'train': 8, 'test': 1, 'val': 1}<br />worker_set: Dict[str, int]={'train': 8, 'test': 2, 'val': 1}<br />random_seed: Optional[int]=None|Using `ratio_set` and `worker_set` to Load the Multi-outputs Dataset with Numpy format into `torch.utils.data.DataLoader`.|
|set_freeze|require_grad: Dict[int, bool]|freeze some layers by given `requires_grad=False` if trained model will be loaded to execute experiments. |
|train_val|num_epochs: int=2<br />interval: int=100<br />tolerance: float=1e-3<br />patience: int=10<br />backend: str='threading'<br />n_jobs: int=-1<br />early_stop: bool=False|Using `num_epochs`, `tolerance`, `patience` to Control Training Process and `interval` to Adjust Print Interval with Accelerated Validation Combined with `backend` and `n_jobs`.|
|test|sort_by: str='accuracy'<br />sort_state: bool=True|Sort Returned Test Result about Correct Classes with `sort_by` and `sort_state` Which Only Appears in Classification.|
|test|/|Test Module Only Show with Loss at 3 Stages: Train, Test, Val|
|save|con: bool=True<br />dir: str='./model'|Save Trained Model Parameters with Model `state_dict` Control by `con`.|
|load|con: bool=True<br />dir: str='./model'|Load Trained Model Parameters with Model `state_dict` Control by `con`.|

Expand Down
2 changes: 1 addition & 1 deletion released_box/perming/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,4 +27,4 @@
'Multi-outputs': Ranker
}

__version__ = '1.8.1'
__version__ = '1.8.2'
2 changes: 1 addition & 1 deletion released_box/perming/_typing.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,4 @@
Optional
)

TabularData = numpy.ndarray
TabularData = numpy.ndarray # used as medium for storage format
22 changes: 11 additions & 11 deletions released_box/perming/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class MLP(torch.nn.Module):
:param input_: int, input dataset with features' dimension of tabular data is input_.
:param num_classes: int, total number of correct label categories or multi-outputs.
:param hidden_layer_sizes: Tuple[int], configure the length and size of each hidden layer.
:param activation:, activation configured by Box, Regressier, Binarier, Multipler, and Ranker.
:param activation:, activation configured by Box, Regressier, Binarier, Mutipler, and Ranker.
'''
def __init__(self, input_: int, num_classes: int, hidden_layer_sizes: Tuple[int], activation) -> None:
super(MLP, self).__init__()
Expand Down Expand Up @@ -83,18 +83,18 @@ def __init__(self,
assert num_classes > 0, 'Supervised learning problems with num_classes ranges from (1, 2, 3, ...).'
assert batch_size > 0, 'Batch size initialized with int value mostly 2^n(n=1, 2, 3), like 64, 128, 256.'
assert learning_rate_init > 1e-6 and learning_rate_init < 1.0, 'Please assert learning rate initialized value in (1e-6, 1.0).'
self.input: int = input_
self.num_classes: int = num_classes
self.input: int = input_ # numbers of features described in one sample
self.num_classes: int = num_classes # numbers of prediction outputs
self.activation = activation # function activate high-dimensional features
self.device = device # device configuration
self.criterion = criterion # criterion with classification & torch.long, regression & torch.float, and multi-outputs & roc
self.batch_size: int = batch_size
self.lr: float = learning_rate_init
self.batch_size: int = batch_size # batch size in train_loader, test_loader, val_loader
self.lr: float = learning_rate_init # initial learning rate in `self.solver` or `self.lr_scheduler`
self.model = MLP(self.input, self.num_classes, hidden_layer_sizes, self.activation).to(self.device)
if parse_torch_version(torch.__version__)[0] >= ['2', '0', '0']: # compile model
self.model = torch.compile(self.model)
self.solver = self._solver(solver)
self.lr_scheduler = self._scheduler(lr_scheduler)
self.solver = self._solver(solver) # configuration of optimization algorithm
self.lr_scheduler = self._scheduler(lr_scheduler) # configuration compatible with `self.solver`

def _solver(self, solver: str):
'''
Expand Down Expand Up @@ -138,7 +138,7 @@ def _scheduler(self, lr_scheduler: Optional[str]):
def _val_acc(self, set: torch.Tensor):
'''
Accumulate Loss Value in Validation Stage.
:param set: torch.Tensor. unordered validation sets coming from val_dataloader.
:param set: torch.Tensor. unordered validation sets coming from val_loader.
'''
outputs_val = self.model(set[0].to(self.device)) # return value from cuda
self.val_loss += self.criterion(outputs_val, set[1].to(self.device))
Expand Down Expand Up @@ -226,7 +226,7 @@ def train_val(self,
Training and Validation with `train_loader` and `val_container`.
:param num_epochs: int, training epochs for `self.model`. default: 2.
:param interval: int, console output interval. default: 100.
:param tolerance: float, tolerance set to judge difference in val_loss. default: 1e-3
:param tolerance: float, tolerance set to judge difference in val_loss. default: 1e-3.
:param patience: int, patience of no improvement waiting for training to stop. default: 10.
:param backend: str, 'threading', 'multiprocessing', 'loky'. default: 'threading'.
:param n_jobs: int, accelerate processing of validation. default: -1.
Expand Down Expand Up @@ -290,8 +290,8 @@ def test(self,
sort_by: str='accuracy',
sort_state: bool=True):
'''
Configured keywords only work when `not self.is_target_2d and num_classes >= 2`.
Produce `self.aver_acc != 0` and 'correct_class != None' in the above condition.
Configured keywords only work in sorting `self.correct_class.items()` when `self.is_task_c1d = True`.
Produce `self.aver_acc != None` and 'correct_class != None' in the above conditions.
:param sort_by: str, 'accuracy', 'numbers', 'num-total'. default: 'accuracy'.
:param sort_state: bool, whether to use descending order when sorting. default: True.
'''
Expand Down

0 comments on commit c61ed24

Please sign in to comment.