Skip to content

Commit

Permalink
fix(style): reformat with latest black version
Browse files Browse the repository at this point in the history
  • Loading branch information
dacorvo committed Jan 26, 2024
1 parent 4ea8215 commit 1b4a731
Show file tree
Hide file tree
Showing 7 changed files with 24 additions and 21 deletions.
6 changes: 3 additions & 3 deletions examples/language-modeling/run_clm.py
Original file line number Diff line number Diff line change
Expand Up @@ -606,9 +606,9 @@ def compute_metrics(eval_preds):
# Data collator will default to DataCollatorWithPadding, so we change it.
data_collator=default_data_collator,
compute_metrics=compute_metrics if training_args.do_eval and not is_torch_tpu_available() else None,
preprocess_logits_for_metrics=preprocess_logits_for_metrics
if training_args.do_eval and not is_torch_tpu_available()
else None,
preprocess_logits_for_metrics=(
preprocess_logits_for_metrics if training_args.do_eval and not is_torch_tpu_available() else None
),
)

# Training
Expand Down
6 changes: 3 additions & 3 deletions examples/language-modeling/run_mlm.py
Original file line number Diff line number Diff line change
Expand Up @@ -631,9 +631,9 @@ def compute_metrics(eval_preds):
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.do_eval and not is_torch_tpu_available() else None,
preprocess_logits_for_metrics=preprocess_logits_for_metrics
if training_args.do_eval and not is_torch_tpu_available()
else None,
preprocess_logits_for_metrics=(
preprocess_logits_for_metrics if training_args.do_eval and not is_torch_tpu_available() else None
),
)

# Training
Expand Down
8 changes: 5 additions & 3 deletions optimum/neuron/distributed/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -397,9 +397,11 @@ def parallelize(
tp_rank = get_tensor_model_parallel_rank()
size_per_rank = parameter.size(partition_dim)
slices = [
None
if idx != partition_dim
else (size_per_rank * tp_rank, size_per_rank * (tp_rank + 1))
(
None
if idx != partition_dim
else (size_per_rank * tp_rank, size_per_rank * (tp_rank + 1))
)
for idx in range(num_dims)
]
else:
Expand Down
8 changes: 5 additions & 3 deletions optimum/neuron/distributed/checkpointing.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,11 @@ def consolidate_tensor_parallel_checkpoints(checkpoint_dir: Union[str, Path]) ->
parameter_names = state_dicts[0]["model"].keys()
sharded_metadatas = {
name: [
ParameterMetadata(**state_dict["sharded_metadata"][name])
if name in state_dict["sharded_metadata"]
else ParameterMetadata("tied")
(
ParameterMetadata(**state_dict["sharded_metadata"][name])
if name in state_dict["sharded_metadata"]
else ParameterMetadata("tied")
)
for state_dict in state_dicts
]
for name in parameter_names
Expand Down
3 changes: 1 addition & 2 deletions tests/distributed/distributed.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,8 +97,7 @@ class DistributedExec(ABC):
exec_timeout: int = TEST_TIMEOUT

@abstractmethod
def run(self):
...
def run(self): ...

def __call__(self, request=None):
self._fixture_kwargs = self._get_fixture_kwargs(request, self.run)
Expand Down
12 changes: 6 additions & 6 deletions tests/test_cache_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,9 +83,9 @@ def test_get_neuron_cache_path(self):
assert get_neuron_cache_path() is None

custom_cache_dir_name = Path("_this/is_/my1/2custom/cache/dir")
os.environ[
"NEURON_CC_FLAGS"
] = f"--some --parameters --here --cache_dir={custom_cache_dir_name} --other --paremeters --here"
os.environ["NEURON_CC_FLAGS"] = (
f"--some --parameters --here --cache_dir={custom_cache_dir_name} --other --paremeters --here"
)

self.assertEqual(get_neuron_cache_path(), custom_cache_dir_name)

Expand All @@ -99,9 +99,9 @@ def _test_set_neuron_cache_path(self, new_cache_path):
set_neuron_cache_path(new_cache_path, ignore_no_cache=True)
self.assertEqual(get_neuron_cache_path(), Path(new_cache_path))

os.environ[
"NEURON_CC_FLAGS"
] = "--some --parameters --here --cache_dir=original_cache_dir --other --paremeters"
os.environ["NEURON_CC_FLAGS"] = (
"--some --parameters --here --cache_dir=original_cache_dir --other --paremeters"
)
set_neuron_cache_path(new_cache_path)
self.assertEqual(get_neuron_cache_path(), Path(new_cache_path))

Expand Down
2 changes: 1 addition & 1 deletion tests/test_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ class Coverage(str, Enum):
TPSupport.FULL,
Coverage.HIGH,
{"num_hidden_layers": 2},
)
),
# "wav2vec2": "facebook/wav2vec2-base",
# Remaning: XLNet, Deberta-v2, MPNet, CLIP
}
Expand Down

0 comments on commit 1b4a731

Please sign in to comment.