Skip to content

Commit

Permalink
Move utils from models.utils to utils folder
Browse files Browse the repository at this point in the history
  • Loading branch information
nik-mosaic committed Aug 19, 2023
1 parent c724ba1 commit b4e4b86
Show file tree
Hide file tree
Showing 6 changed files with 8 additions and 8 deletions.
4 changes: 0 additions & 4 deletions llmfoundry/models/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,6 @@

from llmfoundry.models.utils.adapt_tokenizer import (
AutoTokenizerForMOD, adapt_tokenizer_for_denoising)
from llmfoundry.models.utils.checkpoint_conversion_helpers import (
convert_and_save_ft_weights, get_hf_tokenizer_from_composer_state_dict)
from llmfoundry.models.utils.hf_prefixlm_converter import (
add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm)
from llmfoundry.models.utils.meta_init_context import (init_empty_weights,
Expand All @@ -15,9 +13,7 @@
__all__ = [
'AutoTokenizerForMOD',
'adapt_tokenizer_for_denoising',
'convert_and_save_ft_weights',
'convert_hf_causal_lm_to_prefix_lm',
'get_hf_tokenizer_from_composer_state_dict',
'init_empty_weights',
'init_on_device',
'add_bidirectional_mask_if_missing',
Expand Down
4 changes: 4 additions & 0 deletions llmfoundry/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@
build_icl_evaluators, build_logger,
build_optimizer, build_scheduler,
build_tokenizer)
from llmfoundry.utils.checkpoint_conversion_helpers import (
convert_and_save_ft_weights, get_hf_tokenizer_from_composer_state_dict)
from llmfoundry.utils.config_utils import (calculate_batch_size_info,
log_config, pop_config,
update_batch_size_info)
Expand All @@ -23,6 +25,8 @@
'build_icl_evaluators',
'build_tokenizer',
'calculate_batch_size_info',
'convert_and_save_ft_weights',
'get_hf_tokenizer_from_composer_state_dict',
'update_batch_size_info',
'log_config',
'pop_config',
Expand Down
4 changes: 2 additions & 2 deletions scripts/inference/convert_composer_mpt_to_ft.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@
from composer.utils import get_file, safe_torch_load
from transformers import PreTrainedTokenizer

from llmfoundry.models.utils import (convert_and_save_ft_weights,
get_hf_tokenizer_from_composer_state_dict)
from llmfoundry.utils import (convert_and_save_ft_weights,
get_hf_tokenizer_from_composer_state_dict)


def save_ft_config(composer_config: dict,
Expand Down
2 changes: 1 addition & 1 deletion scripts/inference/convert_composer_to_hf.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from transformers import AutoConfig, PretrainedConfig, PreTrainedTokenizerBase

from llmfoundry import MPTConfig, MPTForCausalLM
from llmfoundry.models.utils import get_hf_tokenizer_from_composer_state_dict
from llmfoundry.utils import get_hf_tokenizer_from_composer_state_dict
from llmfoundry.utils.huggingface_hub_utils import \
edit_files_for_hf_compatibility

Expand Down
2 changes: 1 addition & 1 deletion scripts/inference/convert_hf_mpt_to_ft.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@

import transformers

from llmfoundry.models.utils import convert_and_save_ft_weights
from llmfoundry.utils import convert_and_save_ft_weights


def convert_mpt_to_ft(model_name_or_path: str,
Expand Down

0 comments on commit b4e4b86

Please sign in to comment.