Skip to content

Commit

Permalink
Fix to work PIECEWISE_CONSTANT, update requirement.txt and README #1393
Browse files Browse the repository at this point in the history
  • Loading branch information
kohya-ss committed Sep 11, 2024
1 parent fd68703 commit 6dbfd47
Show file tree
Hide file tree
Showing 3 changed files with 54 additions and 25 deletions.
9 changes: 9 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,15 @@ The majority of scripts is licensed under ASL 2.0 (including codes from Diffuser

### Working in progress

- __important__ The dependent libraries are updated. Please see [Upgrade](#upgrade) and update the libraries.
- transformers, accelerate and huggingface_hub are updated.
- If you encounter any issues, please report them.

- en: The INVERSE_SQRT, COSINE_WITH_MIN_LR, and WARMUP_STABLE_DECAY learning rate schedules are now available in the transformers library. See PR [#1393](https://github.com/kohya-ss/sd-scripts/pull/1393) for details. Thanks to sdbds!
- See the [transformers documentation](https://huggingface.co/docs/transformers/v4.44.2/en/main_classes/optimizer_schedules#schedules) for details on each scheduler.
- `--lr_warmup_steps` and `--lr_decay_steps` can now be specified as a ratio of the number of training steps, not just the step value. Example: `--lr_warmup_steps=0.1` or `--lr_warmup_steps=10%`, etc.

https://github.com/kohya-ss/sd-scripts/pull/1393
- When enlarging images in the script (when the size of the training image is small and bucket_no_upscale is not specified), it has been changed to use Pillow's resize and LANCZOS interpolation instead of OpenCV2's resize and Lanczos4 interpolation. The quality of the image enlargement may be slightly improved. PR [#1426](https://github.com/kohya-ss/sd-scripts/pull/1426) Thanks to sdbds!

- Sample image generation during training now works on non-CUDA devices. PR [#1433](https://github.com/kohya-ss/sd-scripts/pull/1433) Thanks to millie-v!
Expand Down
66 changes: 43 additions & 23 deletions library/train_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,10 @@
from torchvision import transforms
from transformers import CLIPTokenizer, CLIPTextModel, CLIPTextModelWithProjection
import transformers
from diffusers.optimization import SchedulerType as DiffusersSchedulerType, TYPE_TO_SCHEDULER_FUNCTION as DIFFUSERS_TYPE_TO_SCHEDULER_FUNCTION
from diffusers.optimization import (
SchedulerType as DiffusersSchedulerType,
TYPE_TO_SCHEDULER_FUNCTION as DIFFUSERS_TYPE_TO_SCHEDULER_FUNCTION,
)
from transformers.optimization import SchedulerType, TYPE_TO_SCHEDULER_FUNCTION
from diffusers import (
StableDiffusionPipeline,
Expand Down Expand Up @@ -2974,7 +2977,7 @@ def add_sd_models_arguments(parser: argparse.ArgumentParser):

def add_optimizer_arguments(parser: argparse.ArgumentParser):
def int_or_float(value):
if value.endswith('%'):
if value.endswith("%"):
try:
return float(value[:-1]) / 100.0
except ValueError:
Expand Down Expand Up @@ -3041,13 +3044,15 @@ def int_or_float(value):
"--lr_warmup_steps",
type=int_or_float,
default=0,
help="Int number of steps for the warmup in the lr scheduler (default is 0) or float with ratio of train steps / 学習率のスケジューラをウォームアップするステップ数(デフォルト0)",
help="Int number of steps for the warmup in the lr scheduler (default is 0) or float with ratio of train steps"
" / 学習率のスケジューラをウォームアップするステップ数(デフォルト0)、または学習ステップの比率(1未満のfloat値の場合)",
)
parser.add_argument(
"--lr_decay_steps",
type=int_or_float,
default=0,
help="Int number of steps for the decay in the lr scheduler (default is 0) or float with ratio of train steps",
help="Int number of steps for the decay in the lr scheduler (default is 0) or float (<1) with ratio of train steps"
" / 学習率のスケジューラを減衰させるステップ数(デフォルト0)、または学習ステップの比率(1未満のfloat値の場合)",
)
parser.add_argument(
"--lr_scheduler_num_cycles",
Expand All @@ -3071,13 +3076,16 @@ def int_or_float(value):
"--lr_scheduler_timescale",
type=int,
default=None,
help="Inverse sqrt timescale for inverse sqrt scheduler,defaults to `num_warmup_steps`",
help="Inverse sqrt timescale for inverse sqrt scheduler,defaults to `num_warmup_steps`"
" / 逆平方根スケジューラのタイムスケール、デフォルトは`num_warmup_steps`",
,
)
parser.add_argument(
"--lr_scheduler_min_lr_ratio",
type=float,
default=None,
help="The minimum learning rate as a ratio of the initial learning rate for cosine with min lr scheduler and warmup decay scheduler",
help="The minimum learning rate as a ratio of the initial learning rate for cosine with min lr scheduler and warmup decay scheduler"
" / 初期学習率の比率としての最小学習率を指定する、cosine with min lr と warmup decay スケジューラ で有効",
)


Expand Down Expand Up @@ -4327,8 +4335,12 @@ def get_scheduler_fix(args, optimizer: Optimizer, num_processes: int):
"""
name = args.lr_scheduler
num_training_steps = args.max_train_steps * num_processes # * args.gradient_accumulation_steps
num_warmup_steps: Optional[int] = int(args.lr_warmup_steps * num_training_steps) if isinstance(args.lr_warmup_steps, float) else args.lr_warmup_steps
num_decay_steps: Optional[int] = int(args.lr_decay_steps * num_training_steps) if isinstance(args.lr_decay_steps, float) else args.lr_decay_steps
num_warmup_steps: Optional[int] = (
int(args.lr_warmup_steps * num_training_steps) if isinstance(args.lr_warmup_steps, float) else args.lr_warmup_steps
)
num_decay_steps: Optional[int] = (
int(args.lr_decay_steps * num_training_steps) if isinstance(args.lr_decay_steps, float) else args.lr_decay_steps
)
num_stable_steps = num_training_steps - num_warmup_steps - num_decay_steps
num_cycles = args.lr_scheduler_num_cycles
power = args.lr_scheduler_power
Expand Down Expand Up @@ -4369,15 +4381,17 @@ def wrap_check_needless_num_warmup_steps(return_vals):
# logger.info(f"adafactor scheduler init lr {initial_lr}")
return wrap_check_needless_num_warmup_steps(transformers.optimization.AdafactorSchedule(optimizer, initial_lr))

name = SchedulerType(name) or DiffusersSchedulerType(name)
schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name] or DIFFUSERS_TYPE_TO_SCHEDULER_FUNCTION[name]
if name == DiffusersSchedulerType.PIECEWISE_CONSTANT.value:
name = DiffusersSchedulerType(name)
schedule_func = DIFFUSERS_TYPE_TO_SCHEDULER_FUNCTION[name]
return schedule_func(optimizer, **lr_scheduler_kwargs) # step_rules and last_epoch are given as kwargs

name = SchedulerType(name)
schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name]

if name == SchedulerType.CONSTANT:
return wrap_check_needless_num_warmup_steps(schedule_func(optimizer, **lr_scheduler_kwargs))

if name == DiffusersSchedulerType.PIECEWISE_CONSTANT:
return schedule_func(optimizer, **lr_scheduler_kwargs) # step_rules and last_epoch are given as kwargs

# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.")
Expand Down Expand Up @@ -4408,11 +4422,11 @@ def wrap_check_needless_num_warmup_steps(return_vals):

if name == SchedulerType.COSINE_WITH_MIN_LR:
return schedule_func(
optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps,
optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps,
num_cycles=num_cycles / 2,
min_lr_rate=min_lr_ratio,
min_lr_rate=min_lr_ratio,
**lr_scheduler_kwargs,
)

Expand All @@ -4421,16 +4435,22 @@ def wrap_check_needless_num_warmup_steps(return_vals):
raise ValueError(f"{name} requires `num_decay_steps`, please provide that argument.")
if name == SchedulerType.WARMUP_STABLE_DECAY:
return schedule_func(
optimizer,
num_warmup_steps=num_warmup_steps,
num_stable_steps=num_stable_steps,
num_decay_steps=num_decay_steps,
num_cycles=num_cycles / 2,
optimizer,
num_warmup_steps=num_warmup_steps,
num_stable_steps=num_stable_steps,
num_decay_steps=num_decay_steps,
num_cycles=num_cycles / 2,
min_lr_ratio=min_lr_ratio if min_lr_ratio is not None else 0.0,
**lr_scheduler_kwargs,
)

return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, num_decay_steps=num_decay_steps, **lr_scheduler_kwargs)
return schedule_func(
optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps,
num_decay_steps=num_decay_steps,
**lr_scheduler_kwargs,
)


def prepare_dataset_args(args: argparse.Namespace, support_metadata: bool):
Expand Down
4 changes: 2 additions & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
accelerate==0.30.0
transformers==4.41.2
transformers==4.44.0
diffusers[torch]==0.25.0
ftfy==6.1.1
# albumentations==1.3.0
Expand All @@ -16,7 +16,7 @@ altair==4.2.2
easygui==0.98.3
toml==0.10.2
voluptuous==0.13.1
huggingface-hub==0.23.3
huggingface-hub==0.24.5
# for Image utils
imagesize==1.4.1
# for BLIP captioning
Expand Down

0 comments on commit 6dbfd47

Please sign in to comment.