diff --git a/llmfoundry/utils/builders.py b/llmfoundry/utils/builders.py index 943fcdf7dc..16881b860a 100644 --- a/llmfoundry/utils/builders.py +++ b/llmfoundry/utils/builders.py @@ -29,7 +29,7 @@ from llmfoundry.eval.datasets.in_context_learning_evaluation import \ get_icl_task_dataloader from llmfoundry.tokenizers.tiktoken import TiktokenTokenizerWrapper -from llmfoundry.utils.config_utils import to_dict_recursive +from llmfoundry.utils.config_utils import to_dict_recursive, to_list_recursive from llmfoundry.utils.registry_utils import construct_from_registry from llmfoundry.utils.warnings import VersionedDeprecationWarning @@ -484,7 +484,7 @@ def build_icl_evaluators( log.info(f'Extracting ICL task config from path: {icl_tasks}') with open(icl_tasks, 'r') as icl_f: icl_task_cfg = om.load(icl_f) - icl_tasks_list = to_dict_recursive(icl_task_cfg.icl_tasks) + icl_tasks_list = to_list_recursive(icl_task_cfg.icl_tasks) else: icl_tasks_list = icl_tasks diff --git a/llmfoundry/utils/config_utils.py b/llmfoundry/utils/config_utils.py index db29bcf5c5..97195a8a0f 100644 --- a/llmfoundry/utils/config_utils.py +++ b/llmfoundry/utils/config_utils.py @@ -118,7 +118,7 @@ def make_dataclass_and_log_config( for key in extraneous_keys: warnings.warn( f'Unused parameter {key} found in cfg. Please check your yaml to ensure this parameter is necessary. Interpreting {key} as a variable for logging purposes. Top-level variables are deprecated and will not be supported in future releases.', - DeprecationWarning) + category=DeprecationWarning) unstructured_config['variables'][key] = unstructured_config.pop(key) # Create copy of config for logging diff --git a/scripts/eval/eval.py b/scripts/eval/eval.py index 3c4f760cf2..2517fdfc24 100644 --- a/scripts/eval/eval.py +++ b/scripts/eval/eval.py @@ -255,7 +255,7 @@ def main(cfg: DictConfig) -> Tuple[List[Trainer], pd.DataFrame]: run_name = eval_config.run_name if eval_config.run_name else default_run_name reproducibility.seed_all(eval_config.seed) - dist.initialize_dist(get_device(None), timeout=eval_config.dist_timeout) + # dist.initialize_dist(get_device(None), timeout=eval_config.dist_timeout) logging.basicConfig( # Example of format string diff --git a/scripts/eval/yamls/hf_eval.yaml b/scripts/eval/yamls/hf_eval.yaml index 15e53edcaa..708c871d88 100644 --- a/scripts/eval/yamls/hf_eval.yaml +++ b/scripts/eval/yamls/hf_eval.yaml @@ -3,8 +3,11 @@ variables: model_name_or_path: EleutherAI/gpt-neo-125m # otherwise, write a block for each model you want to test in the `models` section -precision: fp32 -max_seq_len: 1024 + precision: fp32 + max_seq_len: 1024 + +precision: ${variables.precision} +max_seq_len: ${variables.max_seq_len} models: -