Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support Remote and HF promptfiles in hf_generate script #786

Open
wants to merge 13 commits into
base: main
Choose a base branch
from
84 changes: 83 additions & 1 deletion llmfoundry/utils/prompt_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,16 @@
import os
from typing import List, Optional

from composer.utils import get_file, parse_uri
from datasets import load_dataset

PROMPTFILE_PREFIX = 'file::'
PROMPTDATASET_PREFIX = 'hf://'


def load_prompts(prompts: List[str],
prompt_delimiter: Optional[str] = None) -> List[str]:
"""Loads a set of prompts, both free text and from file.
"""Loads a set of prompts, both free text and from file or HF dataset.

Args:
prompts (List[str]): List of free text prompts and prompt files
Expand All @@ -21,9 +25,16 @@ def load_prompts(prompts: List[str],
"""
prompt_strings = []
for prompt in prompts:
backend, _, _ = parse_uri(prompt)
if prompt.startswith(PROMPTFILE_PREFIX):
prompts = load_prompts_from_file(prompt, prompt_delimiter)
prompt_strings.extend(prompts)
elif prompt.startswith(PROMPTDATASET_PREFIX):
prompts = load_prompts_from_dataset(prompt, prompt_delimiter)
prompt_strings.extend(prompts)
elif backend not in ['', None]:
prompts = load_prompts_from_remote(prompt, prompt_delimiter)
prompt_strings.extend(prompts)
else:
prompt_strings.append(prompt)
return prompt_strings
Expand All @@ -45,6 +56,7 @@ def load_prompts_from_file(prompt_path: str,
raise ValueError(f'prompt_path_str must start with {PROMPTFILE_PREFIX}')

_, prompt_file_path = prompt_path.split(PROMPTFILE_PREFIX, maxsplit=1)
# local file
prompt_file_path = os.path.expanduser(prompt_file_path)
if not os.path.isfile(prompt_file_path):
raise FileNotFoundError(
Expand All @@ -56,3 +68,73 @@ def load_prompts_from_file(prompt_path: str,
if prompt_delimiter is None:
return [prompt_string]
return [i for i in prompt_string.split(prompt_delimiter) if i]


def load_prompts_from_remote(prompt_path: str,
prompt_delimiter: Optional[str] = None
) -> List[str]:
"""Load a set of prompts from object storage.

Args:
prompt_path (str): Path for text file
prompt_delimiter (Optional str): Delimiter for text file
If not provided, assumes the prompt file is a single prompt (non-delimited)

Returns:
List of prompt string(s)
"""
backend, _, _ = parse_uri(prompt_path)
if backend in ['', None]:
raise ValueError(
f'prompt_path_str must start with s3:// etc if using object storage'
)

local_path = prompt_path.split('/')[-1]
get_file(path=prompt_path, destination=local_path, overwrite=True)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

you probably want to make local_path a tmp location?


with open(local_path, 'r') as f:
prompt_string = f.read()

if prompt_delimiter is None:
return [prompt_string]
return [i for i in prompt_string.split(prompt_delimiter) if i]


def load_prompts_from_dataset(dataset_path: str,
prompt_delimiter: Optional[str] = None
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd prefer you just add a new argument to the script for the dataset column name instead of overloading the delimiter param

) -> List[str]:
"""Load a set of prompts from a huggingface dataset.

Args:
dataset_path (str): Path for dataset
prompt_delimiter (Optional str): We misuse the delimiter here to specify
the name of the prompt column in the dataset. If not provided, assumes the
prompt column is named 'prompt'.

Returns:
List of prompt string(s)
"""
if not dataset_path.startswith(PROMPTDATASET_PREFIX):
raise ValueError(f'dataset_path must start with {PROMPTDATASET_PREFIX}')

_, dataset_path = dataset_path.split(PROMPTDATASET_PREFIX, maxsplit=1)

try:
dataset = load_dataset(dataset_path, token=True)
except:
dataset = load_dataset(dataset_path)

prompt_strings = []
if prompt_delimiter is None:
prompt_delimiter = 'prompt'
try:
ds = dataset['train']
except:
ds = dataset

if prompt_delimiter not in ds.column_names:
raise ValueError(f'{prompt_delimiter} not in dataset columns.')
for prompt in ds[prompt_delimiter]:
prompt_strings.append(prompt)

return prompt_strings
9 changes: 7 additions & 2 deletions scripts/inference/hf_generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,9 @@ def parse_args() -> Namespace:
'This is an explanation of deep learning to a five year old. Deep learning is',
],
help='List of generation prompts or list of delimited files. Use syntax ' +\
'"file::/path/to/prompt.txt" to load a prompt(s) contained in a txt file.'
f'"{utils.PROMPTFILE_PREFIX}/path/to/prompt.txt" to load prompt(s) contained in a txt file. ' +\
f'You can load a file from an object store without a prefix, e.g. "s3://bucket/path/to/prompt.txt". ' +\
f'\nUse syntax "{utils.PROMPTDATASET_PREFIX}org/dataset" to load prompts from a HF dataset. '
)
parser.add_argument(
'--prompt-delimiter',
Expand Down Expand Up @@ -277,7 +279,10 @@ def _generate(encoded_inp: Dict[str, torch.Tensor]):
print(f'\nTokenizing prompts...')
maybe_synchronize()
encode_start = time.time()
encoded_inp = tokenizer(batch, return_tensors='pt', padding=True)
encoded_inp = tokenizer(batch,
return_tensors='pt',
padding=True,
truncation=True)
for key, value in encoded_inp.items():
encoded_inp[key] = value.to(model.device)
maybe_synchronize()
Expand Down
Loading