From 6d6e3063dead7b6b130d9ef21674fd2c659a0127 Mon Sep 17 00:00:00 2001 From: Russell Kaplan Date: Mon, 17 Jul 2023 20:58:22 -0700 Subject: [PATCH] Deployed 2561392 with MkDocs version: 1.4.3 --- api/data_types/index.html | 286 +++++++++++++++++++--------------- api/error_handling/index.html | 35 ++--- api/python_client/index.html | 112 ++++++++++--- index.html | 13 +- search/search_index.json | 2 +- sitemap.xml.gz | Bin 127 -> 127 bytes 6 files changed, 278 insertions(+), 170 deletions(-) diff --git a/api/data_types/index.html b/api/data_types/index.html index 578a0e549..850b140db 100644 --- a/api/data_types/index.html +++ b/api/data_types/index.html @@ -1275,21 +1275,20 @@

🐍 Python Client Data Type Referenc

- CompletionOutput + CompletionOutput

-

- Bases: BaseModel

+

+ Bases: BaseModel

Represents the output of a completion request to a model.

-
@@ -1305,7 +1304,7 @@

- text + text @@ -1328,7 +1327,7 @@

- num_completion_tokens + num_completion_tokens @@ -1361,16 +1360,15 @@

- CompletionStreamOutput + CompletionStreamOutput

-

- Bases: BaseModel

- +

+ Bases: BaseModel

@@ -1389,7 +1387,7 @@

- text + text @@ -1412,7 +1410,7 @@

- finished + finished @@ -1435,12 +1433,12 @@

- num_completion_tokens + num_completion_tokens - instance-attribute class-attribute + instance-attribute

@@ -1469,21 +1467,20 @@

- CompletionSyncResponse + CompletionSyncResponse

-

- Bases: BaseModel

+

+ Bases: BaseModel

Response object for a synchronous prompt completion.

-
@@ -1499,7 +1496,7 @@

- output + output @@ -1522,7 +1519,7 @@

- request_id + request_id @@ -1555,21 +1552,20 @@

- CompletionStreamResponse + CompletionStreamResponse

-

- Bases: BaseModel

+

+ Bases: BaseModel

Response object for a stream prompt completion task.

-
@@ -1585,12 +1581,12 @@

- output + output - instance-attribute class-attribute + instance-attribute

@@ -1609,7 +1605,7 @@

- request_id + request_id @@ -1642,21 +1638,20 @@

- CreateFineTuneRequest + CreateFineTuneRequest

-

- Bases: BaseModel

+

+ Bases: BaseModel

Request object for creating a FineTune.

-
@@ -1672,16 +1667,19 @@

- model + model - instance-attribute class-attribute + instance-attribute

-
model: str = Field(Ellipsis, description='Identifier of base model to train from.')
+
model: str = Field(
+    ...,
+    description="Identifier of base model to train from.",
+)
 
@@ -1696,16 +1694,19 @@

- training_file + training_file - instance-attribute class-attribute + instance-attribute

-
training_file: str = Field(Ellipsis, description="Path to file of training dataset. Dataset must be a csv with columns 'prompt' and 'response'.")
+
training_file: str = Field(
+    ...,
+    description="Path to file of training dataset. Dataset must be a csv with columns 'prompt' and 'response'.",
+)
 
@@ -1720,16 +1721,19 @@

- validation_file + validation_file - instance-attribute class-attribute + instance-attribute

-
validation_file: Optional[str] = Field(default=None, description='Path to file of validation dataset. Has the same format as training_file. If not provided, we will generate a split from the training dataset.')
+
validation_file: Optional[str] = Field(
+    default=None,
+    description="Path to file of validation dataset. Has the same format as training_file. If not provided, we will generate a split from the training dataset.",
+)
 
@@ -1744,16 +1748,19 @@

- hyperparameters + hyperparameters - instance-attribute class-attribute + instance-attribute

-
hyperparameters: Optional[Dict[str, Any]] = Field(default=None, description='Hyperparameters to pass in to training job.')
+
hyperparameters: Optional[Dict[str, Any]] = Field(
+    default=None,
+    description="Hyperparameters to pass in to training job.",
+)
 
@@ -1768,16 +1775,19 @@

- suffix + suffix - instance-attribute class-attribute + instance-attribute

-
suffix: Optional[str] = Field(default=None, description='Optional user-provided identifier suffix for the fine-tuned model.')
+
suffix: Optional[str] = Field(
+    default=None,
+    description="Optional user-provided identifier suffix for the fine-tuned model.",
+)
 
@@ -1802,21 +1812,20 @@

- CreateFineTuneResponse + CreateFineTuneResponse

-

- Bases: BaseModel

+

+ Bases: BaseModel

Response object for creating a FineTune.

-
@@ -1832,16 +1841,18 @@

- fine_tune_id + fine_tune_id - instance-attribute class-attribute + instance-attribute

-
fine_tune_id: str = Field(Ellipsis, description='ID of the created fine-tuning job.')
+
fine_tune_id: str = Field(
+    ..., description="ID of the created fine-tuning job."
+)
 
@@ -1866,21 +1877,20 @@

- GetFineTuneResponse + GetFineTuneResponse

-

- Bases: BaseModel

+

+ Bases: BaseModel

Response object for retrieving a FineTune.

-
@@ -1896,16 +1906,18 @@

- fine_tune_id + fine_tune_id - instance-attribute class-attribute + instance-attribute

-
fine_tune_id: str = Field(Ellipsis, description='ID of the requested job.')
+
fine_tune_id: str = Field(
+    ..., description="ID of the requested job."
+)
 
@@ -1920,16 +1932,18 @@

- status + status - instance-attribute class-attribute + instance-attribute

-
status: BatchJobStatus = Field(Ellipsis, description='Status of the requested job.')
+
status: BatchJobStatus = Field(
+    ..., description="Status of the requested job."
+)
 
@@ -1954,21 +1968,20 @@

- ListFineTunesResponse + ListFineTunesResponse

-

- Bases: BaseModel

+

+ Bases: BaseModel

Response object for listing FineTunes.

-
@@ -1984,16 +1997,19 @@

- jobs + jobs - instance-attribute class-attribute + instance-attribute

-
jobs: List[GetFineTuneResponse] = Field(Ellipsis, description='List of fine-tuning jobs and their statuses.')
+
jobs: List[GetFineTuneResponse] = Field(
+    ...,
+    description="List of fine-tuning jobs and their statuses.",
+)
 
@@ -2018,21 +2034,20 @@

- CancelFineTuneResponse + CancelFineTuneResponse

-

- Bases: BaseModel

+

+ Bases: BaseModel

Response object for cancelling a FineTune.

-
@@ -2048,16 +2063,18 @@

- success + success - instance-attribute class-attribute + instance-attribute

-
success: bool = Field(Ellipsis, description='Whether cancellation was successful.')
+
success: bool = Field(
+    ..., description="Whether cancellation was successful."
+)
 
@@ -2082,21 +2099,20 @@

- GetLLMEndpointResponse + GetLLMEndpointResponse

-

- Bases: BaseModel

+

+ Bases: BaseModel

Response object for retrieving a Model.

-
@@ -2112,16 +2128,18 @@

- name + name - instance-attribute class-attribute + instance-attribute

-
name: str = Field(description='The name of the model. Use this for making inference requests to the model.')
+
name: str = Field(
+    description="The name of the model. Use this for making inference requests to the model."
+)
 
@@ -2136,16 +2154,18 @@

- source + source - instance-attribute class-attribute + instance-attribute

-
source: LLMSource = Field(description='The source of the model, e.g. Hugging Face.')
+
source: LLMSource = Field(
+    description="The source of the model, e.g. Hugging Face."
+)
 
@@ -2160,16 +2180,18 @@

- inference_framework + inference_framework - instance-attribute class-attribute + instance-attribute

-
inference_framework: LLMInferenceFramework = Field(description='The inference framework used by the model.')
+
inference_framework: LLMInferenceFramework = Field(
+    description="The inference framework used by the model."
+)
 
@@ -2184,16 +2206,19 @@

- id + id - instance-attribute class-attribute + instance-attribute

-
id: Optional[str] = Field(default=None, description='(For self-hosted users) The autogenerated ID of the model.')
+
id: Optional[str] = Field(
+    default=None,
+    description="(For self-hosted users) The autogenerated ID of the model.",
+)
 
@@ -2208,16 +2233,19 @@

- model_name + model_name - instance-attribute class-attribute + instance-attribute

-
model_name: Optional[str] = Field(default=None, description='(For self-hosted users) For fine-tuned models, the base model. For base models, this will be the same as `name`.')
+
model_name: Optional[str] = Field(
+    default=None,
+    description="(For self-hosted users) For fine-tuned models, the base model. For base models, this will be the same as `name`.",
+)
 
@@ -2232,16 +2260,19 @@

- inference_framework_tag + inference_framework_tag - instance-attribute class-attribute + instance-attribute

-
inference_framework_tag: Optional[str] = Field(default=None, description='(For self-hosted users) The Docker image tag used to run the model.')
+
inference_framework_tag: Optional[str] = Field(
+    default=None,
+    description="(For self-hosted users) The Docker image tag used to run the model.",
+)
 
@@ -2256,16 +2287,19 @@

- num_shards + num_shards - instance-attribute class-attribute + instance-attribute

-
num_shards: Optional[int] = Field(default=None, description='(For self-hosted users) The number of shards.')
+
num_shards: Optional[int] = Field(
+    default=None,
+    description="(For self-hosted users) The number of shards.",
+)
 
@@ -2280,16 +2314,19 @@

- quantize + quantize - instance-attribute class-attribute + instance-attribute

-
quantize: Optional[Quantization] = Field(default=None, description='(For self-hosted users) The quantization method.')
+
quantize: Optional[Quantization] = Field(
+    default=None,
+    description="(For self-hosted users) The quantization method.",
+)
 
@@ -2304,16 +2341,19 @@

- spec + spec - instance-attribute class-attribute + instance-attribute

-
spec: Optional[GetModelEndpointResponse] = Field(default=None, description='(For self-hosted users) Model endpoint details.')
+
spec: Optional[GetModelEndpointResponse] = Field(
+    default=None,
+    description="(For self-hosted users) Model endpoint details.",
+)
 
@@ -2338,21 +2378,20 @@

- ListLLMEndpointsResponse + ListLLMEndpointsResponse

-

- Bases: BaseModel

+

+ Bases: BaseModel

Response object for listing Models.

-
@@ -2368,16 +2407,18 @@

- model_endpoints + model_endpoints - instance-attribute class-attribute + instance-attribute

-
model_endpoints: List[GetLLMEndpointResponse] = Field(Ellipsis, description='The list of models.')
+
model_endpoints: List[GetLLMEndpointResponse] = Field(
+    ..., description="The list of models."
+)
 
@@ -2402,21 +2443,20 @@

- DeleteLLMEndpointResponse + DeleteLLMEndpointResponse

-

- Bases: BaseModel

+

+ Bases: BaseModel

Response object for deleting a Model.

-
@@ -2432,16 +2472,18 @@

- deleted + deleted - instance-attribute class-attribute + instance-attribute

-
deleted: bool = Field(Ellipsis, description='Whether deletion was successful.')
+
deleted: bool = Field(
+    ..., description="Whether deletion was successful."
+)
 
diff --git a/api/error_handling/index.html b/api/error_handling/index.html index dcce53c52..e9e745289 100644 --- a/api/error_handling/index.html +++ b/api/error_handling/index.html @@ -619,7 +619,7 @@

Error handling - BadRequestError + BadRequestError

@@ -627,15 +627,14 @@

-

- Bases: Exception

+

+ Bases: Exception

Corresponds to HTTP 400. Indicates that the request had inputs that were invalid. The user should not attempt to retry the request without changing the inputs.

-
@@ -661,7 +660,7 @@

- UnauthorizedError + UnauthorizedError

@@ -669,14 +668,13 @@

-

- Bases: Exception

+

+ Bases: Exception

Corresponds to HTTP 401. This means that no valid API key was provided.

-
@@ -702,7 +700,7 @@

- NotFoundError + NotFoundError

@@ -710,8 +708,8 @@

-

- Bases: Exception

+

+ Bases: Exception

Corresponds to HTTP 404. This means that the resource (e.g. a Model, FineTune, etc.) could not be found. @@ -720,7 +718,6 @@

the user does not have access to.

-
@@ -746,7 +743,7 @@

- RateLimitExceededError + RateLimitExceededError

@@ -754,14 +751,13 @@

-

- Bases: Exception

+

+ Bases: Exception

Corresponds to HTTP 429. Too many requests hit the API too quickly. We recommend an exponential backoff for retries.

-
@@ -787,7 +783,7 @@

- ServerError + ServerError

@@ -795,14 +791,13 @@

-

- Bases: Exception

+

+ Bases: Exception

Corresponds to HTTP 5xx errors on the server.

-
diff --git a/api/python_client/index.html b/api/python_client/index.html index d291398d2..0efcb2293 100644 --- a/api/python_client/index.html +++ b/api/python_client/index.html @@ -747,15 +747,15 @@

🐍 Python Client API Reference - Completion + Completion

-

- Bases: APIEngine

+

+ Bases: APIEngine

Completion API. This API is used to generate text completions.

@@ -767,7 +767,6 @@

For each of these modes, you can also choose whether to stream token responses or not.

-
@@ -780,12 +779,13 @@

+

- create + create @@ -793,7 +793,17 @@

-
create(model: str, prompt: str, max_new_tokens: int = 20, temperature: float = 0.2, timeout: int = 10, stream: bool = False) -> Union[CompletionSyncResponse, Iterator[CompletionStreamResponse]]
+
create(
+    model: str,
+    prompt: str,
+    max_new_tokens: int = 20,
+    temperature: float = 0.2,
+    timeout: int = 10,
+    stream: bool = False,
+) -> Union[
+    CompletionSyncResponse,
+    Iterator[CompletionStreamResponse],
+]
 
@@ -808,6 +818,8 @@

CompletionStreamV1Response with request_id and outputs fields.

+ +

Parameters:

@@ -911,6 +923,8 @@

+ +

Returns:

@@ -997,12 +1011,13 @@

+

- acreate + acreate @@ -1011,7 +1026,17 @@

-
acreate(model: str, prompt: str, max_new_tokens: int = 20, temperature: float = 0.2, timeout: int = 10, stream: bool = False) -> Union[CompletionSyncResponse, AsyncIterable[CompletionStreamResponse]]
+
acreate(
+    model: str,
+    prompt: str,
+    max_new_tokens: int = 20,
+    temperature: float = 0.2,
+    timeout: int = 10,
+    stream: bool = False,
+) -> Union[
+    CompletionSyncResponse,
+    AsyncIterable[CompletionStreamResponse],
+]
 
@@ -1026,6 +1051,8 @@

CompletionStreamV1Response with request_id and outputs fields.

+ +

Parameters:

@@ -1129,6 +1156,8 @@

+ +

Returns:

@@ -1235,15 +1264,15 @@

- FineTune + FineTune

-

- Bases: APIEngine

+

+ Bases: APIEngine

FineTune API. This API is used to fine-tune models.

@@ -1252,7 +1281,6 @@

Creating a fine-tune will end with the creation of a Model, which you can view using Model.get(model_name) or delete using Model.delete(model_name).

-
@@ -1265,12 +1293,13 @@

+

- create + create @@ -1278,7 +1307,13 @@

-
create(model: str, training_file: str, validation_file: Optional[str] = None, hyperparameters: Optional[Dict[str, str]] = None, suffix: Optional[str] = None) -> CreateFineTuneResponse
+
create(
+    model: str,
+    training_file: str,
+    validation_file: Optional[str] = None,
+    hyperparameters: Optional[Dict[str, str]] = None,
+    suffix: Optional[str] = None,
+) -> CreateFineTuneResponse
 
@@ -1291,6 +1326,8 @@

currently supported. At least 200 rows of data is recommended to start to see benefits from fine-tuning.

+ +

Parameters:

@@ -1383,6 +1420,8 @@

+ +

Returns:

@@ -1455,12 +1494,13 @@

+

- get + get @@ -1481,6 +1521,8 @@

object with the id and status (PENDING, STARTED, UNDEFINED, FAILURE or SUCCESS).

+ +

Parameters:

@@ -1509,6 +1551,8 @@

+ +

Returns:

@@ -1556,12 +1600,13 @@

+

- list + list @@ -1579,6 +1624,8 @@

It returns a list of pairs of fine_tune_id and status for all existing jobs.

+ +

Returns:

@@ -1631,12 +1678,13 @@

+

- cancel + cancel @@ -1655,6 +1703,8 @@

and returns a response object which has a success field confirming if the cancellation was successful.

+ +

Parameters:

@@ -1683,6 +1733,8 @@

+ +

Returns:

@@ -1739,15 +1791,15 @@

- Model + Model

-

- Bases: APIEngine

+

+ Bases: APIEngine

Model API. This API is used to get, list, and delete models. Models include both base @@ -1756,7 +1808,6 @@

See Model Zoo for the list of publicly available base models.

-
@@ -1769,12 +1820,13 @@

+

- get + get @@ -1795,6 +1847,8 @@

GetLLMEndpointResponse object.

+ +

Parameters:

@@ -1823,6 +1877,8 @@

+ +

Returns:

@@ -1875,12 +1931,13 @@

+

- list + list @@ -1900,6 +1957,8 @@

GetLLMEndpointResponse objects for all models. The most important field is the model name.

+ +

Returns:

@@ -1988,12 +2047,13 @@

+

- delete + delete @@ -2013,6 +2073,8 @@

was successful. If called on a base model included with LLM Engine, an error will be thrown.

+ +

Parameters:

@@ -2041,6 +2103,8 @@

+ +

Returns:

diff --git a/index.html b/index.html index aff738222..38519954e 100644 --- a/index.html +++ b/index.html @@ -607,6 +607,11 @@

LLM EngineQuick Install

+
+
+
+
+
pip install scale-llm-engine
 

About

@@ -620,9 +625,11 @@

About& Scale's hosted infrastructure or do it in your own cloud infrastructure using Kubernetes.

Key Features

-

Ready-to-use APIs for your favorite models: Serve and fine-tune -open-source foundation models, including MPT, Falcon, -and LLaMA. Use Scale-hosted models or deploy to your own infrastructure.

+

Ready-to-use APIs for your favorite models: Deploy and serve +open-source foundation models - including LLaMA, MPT and Falcon. +Use Scale-hosted models or deploy to your own infrastructure.

+

Fine-tune your favorite models: Fine-tune open-source foundation +models like LLaMA, MPT etc. with your own data for optimized performance.

Optimized Inference: LLM Engine provides inference APIs for streaming responses and dynamically batching inputs for higher throughput and lower latency.

diff --git a/search/search_index.json b/search/search_index.json index 1ed2056f6..9b5425a85 100644 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Introduction","text":""},{"location":"#llm-engine","title":"LLM Engine","text":"

The open source engine for inference and fine-tuning of Large Language Models.

LLM Engine is the easiest way to customize and serve LLMs.

LLMs can be accessed via Scale's hosted version or by using the helm charts in this repository to run model inference and fine-tuning in your own infrastructure.

"},{"location":"#quick-install","title":"Quick Install","text":"
pip install scale-llm-engine\n
"},{"location":"#about","title":"About","text":"

Foundation models are emerging as the building blocks of AI. However, deploying these models to the cloud and fine-tuning them are expensive operations that require infrastructure and ML expertise. It is also difficult to maintain over time as new models are released and new techniques for both inference and fine-tuning are made available.

LLM Engine is a Python library, CLI, and Helm chart that provides everything you need to serve and fine-tune foundation models, whether you use Scale's hosted infrastructure or do it in your own cloud infrastructure using Kubernetes.

"},{"location":"#key-features","title":"Key Features","text":"

Ready-to-use APIs for your favorite models: Serve and fine-tune open-source foundation models, including MPT, Falcon, and LLaMA. Use Scale-hosted models or deploy to your own infrastructure.

Optimized Inference: LLM Engine provides inference APIs for streaming responses and dynamically batching inputs for higher throughput and lower latency.

Open-Source Integrations: Deploy any Hugging Face model with a single command.

Deploying from any docker image: Turn any Docker image into an auto-scaling deployment with simple APIs.

"},{"location":"#features-coming-soon","title":"Features Coming Soon","text":"

k8s Installation Documentation: We are working hard to document installation and maintenance of inference and fine-tuning functionality on your own infrastructure. For now, our documentation covers using our client libraries to access Scale's hosted infrastructure.

Fast Cold-Start Times: To prevent GPUs from idling, LLM Engine automatically scales your model to zero when it's not in use and scales up within seconds, even for large foundation models.

Cost Optimization: Deploy AI models cheaper than commercial ones, including cold-start and warm-down times.

"},{"location":"contributing/","title":"Contributing to LLM Engine","text":""},{"location":"contributing/#updating-llm-engine-documentation","title":"Updating LLM Engine Documentation","text":"

LLM Engine leverages mkdocs to create beautiful, community-oriented documentation.

"},{"location":"contributing/#step-1-clone-the-repository","title":"Step 1: Clone the Repository","text":"

Clone/Fork the LLM Engine Repository. Our documentation lives in the docs folder.

"},{"location":"contributing/#step-2-install-the-dependencies","title":"Step 2: Install the Dependencies","text":"

Dependencies are located in requirements-docs.txt, go ahead and pip install those with

pip install -r requirements-docs.txt\n
"},{"location":"contributing/#step-3-run-locally","title":"Step 3: Run Locally","text":"

To run the documentation service locally, execute the following command:

mkdocs serve\n

This should kick off a locally running instance on http://127.0.0.1:8000/.

As you edit the content in the docs folder, the site will be automatically reloaded on each file save.

"},{"location":"contributing/#step-4-editing-navigation-and-settings","title":"Step 4: Editing Navigation and Settings","text":"

If you are less familair with mkdocs, in addition to the markdown content in the docs folder, there is a top-level mkdocs.yml file as well that defines the navigation pane and other website settings. If you don't see your page where you think it should be, double-check the .yml file.

"},{"location":"contributing/#step-5-building-and-deploying","title":"Step 5: Building and Deploying","text":"

CircleCI (via .circleci/config.yml) handles the building and deployment of our documentation service for us.

"},{"location":"faq/","title":"Frequently Asked Questions","text":""},{"location":"getting_started/","title":"Getting Started","text":"

The fastest way to get started with LLM Engine is to use the python client in this repository to run inference and fine-tuning on Scale's infrastructure. This path does not require you to install anything on your infrastructure, and Scale's free tier gives you access to experimentation using open source LLMs.

To start with, install LLM Engine via pip or conda:

pipconda
pip install scale-llm-engine\n
conda install scale-llm-engine -c conda-forge\n
"},{"location":"getting_started/#scale-api-keys","title":"Scale API Keys","text":"

Next, you need a Scale Spellbook API key.

"},{"location":"getting_started/#retrieving-your-api-key","title":"Retrieving your API Key","text":"

To retrieve your API key, head to Scale Spellbook where you will get an API key on the settings page.

Different API Keys for different Scale Products

If you have leveraged Scale's platform for annotation work in the past, please note that your Spellbook API key will be different than the Scale Annotation API key. You will want to create a Spellbook API key before getting started.

"},{"location":"getting_started/#set-your-api-key","title":"Set your API Key","text":"

LLM Engine uses environment variables to access your API key.

Set this API key as the SCALE_API_KEY environment variable by running the following command in your terminal before you run your python application.

export SCALE_API_KEY=\"[Your API key]\"\n

You can also add in the line above to your .zshrc or .bash_profile so it's automatically set for future sessions.

"},{"location":"getting_started/#example-code","title":"Example Code","text":""},{"location":"getting_started/#sample-completion","title":"Sample Completion","text":"

With your API key set, you can now send LLM Engine requests using the Python client:

from llmengine import Completion\n\nresponse = Completion.create(\n    model=\"falcon-7b-instruct\",\n    prompt=\"I'm opening a pancake restaurant that specializes in unique pancake shapes, colors, and flavors. List 3 quirky names I could name my restaurant.\",\n    max_new_tokens=100,\n    temperature=0.2,\n)\n\nprint(response.outputs[0].text)\n
"},{"location":"getting_started/#with-streaming","title":"With Streaming","text":"
from llmengine import Completion\nimport sys\nstream = Completion.create(\n    model=\"falcon-7b-instruct\",\n    prompt=\"Give me a 200 word summary on the current economic events in the US.\",\n    max_new_tokens=1000,\n    temperature=0.2,\n    stream=True\n)\n\nfor response in stream:\n    if response.output:\n        print(response.output.text, end=\"\")\n        sys.stdout.flush()\n
"},{"location":"model_zoo/","title":"Public Model Zoo","text":"

Scale hosts the following models in a model zoo:

Model Name Inference APIs Available Fine-tuning APIs Available llama-7b \u2705 \u2705 falcon-7b \u2705 falcon-7b-instruct \u2705 falcon-40b \u2705 falcon-40b-instruct \u2705 mpt-7b \u2705 mpt-7b-instruct \u2705 \u2705 flan-t5-xxl \u2705"},{"location":"model_zoo/#usage","title":"Usage","text":"

Each of these models can be used with the Completion API.

The specified models can be fine-tuned with the FineTune API.

More information about the models can be found using the Model API.

"},{"location":"api/data_types/","title":"\ud83d\udc0d Python Client Data Type Reference","text":""},{"location":"api/data_types/#llmengine.CompletionOutput","title":"CompletionOutput","text":"

Bases: BaseModel

Represents the output of a completion request to a model.

"},{"location":"api/data_types/#llmengine.data_types.CompletionOutput.text","title":"text instance-attribute","text":"
text: str\n

The text of the completion.

"},{"location":"api/data_types/#llmengine.data_types.CompletionOutput.num_completion_tokens","title":"num_completion_tokens instance-attribute","text":"
num_completion_tokens: int\n

Number of tokens in the completion.

"},{"location":"api/data_types/#llmengine.CompletionStreamOutput","title":"CompletionStreamOutput","text":"

Bases: BaseModel

"},{"location":"api/data_types/#llmengine.data_types.CompletionStreamOutput.text","title":"text instance-attribute","text":"
text: str\n

The text of the completion.

"},{"location":"api/data_types/#llmengine.data_types.CompletionStreamOutput.finished","title":"finished instance-attribute","text":"
finished: bool\n

Whether the completion is finished.

"},{"location":"api/data_types/#llmengine.data_types.CompletionStreamOutput.num_completion_tokens","title":"num_completion_tokens instance-attribute class-attribute","text":"
num_completion_tokens: Optional[int] = None\n

Number of tokens in the completion.

"},{"location":"api/data_types/#llmengine.CompletionSyncResponse","title":"CompletionSyncResponse","text":"

Bases: BaseModel

Response object for a synchronous prompt completion.

"},{"location":"api/data_types/#llmengine.data_types.CompletionSyncResponse.output","title":"output instance-attribute","text":"
output: CompletionOutput\n

Completion output.

"},{"location":"api/data_types/#llmengine.data_types.CompletionSyncResponse.request_id","title":"request_id instance-attribute","text":"
request_id: str\n

Unique ID of request.

"},{"location":"api/data_types/#llmengine.CompletionStreamResponse","title":"CompletionStreamResponse","text":"

Bases: BaseModel

Response object for a stream prompt completion task.

"},{"location":"api/data_types/#llmengine.data_types.CompletionStreamResponse.output","title":"output instance-attribute class-attribute","text":"
output: Optional[CompletionStreamOutput] = None\n

Completion output.

"},{"location":"api/data_types/#llmengine.data_types.CompletionStreamResponse.request_id","title":"request_id instance-attribute","text":"
request_id: str\n

Unique ID of request.

"},{"location":"api/data_types/#llmengine.CreateFineTuneRequest","title":"CreateFineTuneRequest","text":"

Bases: BaseModel

Request object for creating a FineTune.

"},{"location":"api/data_types/#llmengine.data_types.CreateFineTuneRequest.model","title":"model instance-attribute class-attribute","text":"
model: str = Field(Ellipsis, description='Identifier of base model to train from.')\n

Identifier of base model to train from.

"},{"location":"api/data_types/#llmengine.data_types.CreateFineTuneRequest.training_file","title":"training_file instance-attribute class-attribute","text":"
training_file: str = Field(Ellipsis, description=\"Path to file of training dataset. Dataset must be a csv with columns 'prompt' and 'response'.\")\n

Path to file of training dataset. Dataset must be a csv with columns 'prompt' and 'response'.

"},{"location":"api/data_types/#llmengine.data_types.CreateFineTuneRequest.validation_file","title":"validation_file instance-attribute class-attribute","text":"
validation_file: Optional[str] = Field(default=None, description='Path to file of validation dataset. Has the same format as training_file. If not provided, we will generate a split from the training dataset.')\n

Path to file of validation dataset. Has the same format as training_file. If not provided, we will generate a split from the training dataset.

"},{"location":"api/data_types/#llmengine.data_types.CreateFineTuneRequest.hyperparameters","title":"hyperparameters instance-attribute class-attribute","text":"
hyperparameters: Optional[Dict[str, Any]] = Field(default=None, description='Hyperparameters to pass in to training job.')\n

Hyperparameters to pass in to training job.

"},{"location":"api/data_types/#llmengine.data_types.CreateFineTuneRequest.suffix","title":"suffix instance-attribute class-attribute","text":"
suffix: Optional[str] = Field(default=None, description='Optional user-provided identifier suffix for the fine-tuned model.')\n

Optional user-provided identifier suffix for the fine-tuned model.

"},{"location":"api/data_types/#llmengine.CreateFineTuneResponse","title":"CreateFineTuneResponse","text":"

Bases: BaseModel

Response object for creating a FineTune.

"},{"location":"api/data_types/#llmengine.data_types.CreateFineTuneResponse.fine_tune_id","title":"fine_tune_id instance-attribute class-attribute","text":"
fine_tune_id: str = Field(Ellipsis, description='ID of the created fine-tuning job.')\n

The ID of the FineTune.

"},{"location":"api/data_types/#llmengine.GetFineTuneResponse","title":"GetFineTuneResponse","text":"

Bases: BaseModel

Response object for retrieving a FineTune.

"},{"location":"api/data_types/#llmengine.data_types.GetFineTuneResponse.fine_tune_id","title":"fine_tune_id instance-attribute class-attribute","text":"
fine_tune_id: str = Field(Ellipsis, description='ID of the requested job.')\n

The ID of the FineTune.

"},{"location":"api/data_types/#llmengine.data_types.GetFineTuneResponse.status","title":"status instance-attribute class-attribute","text":"
status: BatchJobStatus = Field(Ellipsis, description='Status of the requested job.')\n

The status of the FineTune job.

"},{"location":"api/data_types/#llmengine.ListFineTunesResponse","title":"ListFineTunesResponse","text":"

Bases: BaseModel

Response object for listing FineTunes.

"},{"location":"api/data_types/#llmengine.data_types.ListFineTunesResponse.jobs","title":"jobs instance-attribute class-attribute","text":"
jobs: List[GetFineTuneResponse] = Field(Ellipsis, description='List of fine-tuning jobs and their statuses.')\n

A list of FineTunes, represented as GetFineTuneResponses.

"},{"location":"api/data_types/#llmengine.CancelFineTuneResponse","title":"CancelFineTuneResponse","text":"

Bases: BaseModel

Response object for cancelling a FineTune.

"},{"location":"api/data_types/#llmengine.data_types.CancelFineTuneResponse.success","title":"success instance-attribute class-attribute","text":"
success: bool = Field(Ellipsis, description='Whether cancellation was successful.')\n

Whether the cancellation succeeded.

"},{"location":"api/data_types/#llmengine.GetLLMEndpointResponse","title":"GetLLMEndpointResponse","text":"

Bases: BaseModel

Response object for retrieving a Model.

"},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.name","title":"name instance-attribute class-attribute","text":"
name: str = Field(description='The name of the model. Use this for making inference requests to the model.')\n

The name of the model. Use this for making inference requests to the model.

"},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.source","title":"source instance-attribute class-attribute","text":"
source: LLMSource = Field(description='The source of the model, e.g. Hugging Face.')\n

The source of the model, e.g. Hugging Face.

"},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.inference_framework","title":"inference_framework instance-attribute class-attribute","text":"
inference_framework: LLMInferenceFramework = Field(description='The inference framework used by the model.')\n

The inference framework used by the model.

"},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.id","title":"id instance-attribute class-attribute","text":"
id: Optional[str] = Field(default=None, description='(For self-hosted users) The autogenerated ID of the model.')\n

(For self-hosted users) The autogenerated ID of the model.

"},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.model_name","title":"model_name instance-attribute class-attribute","text":"
model_name: Optional[str] = Field(default=None, description='(For self-hosted users) For fine-tuned models, the base model. For base models, this will be the same as `name`.')\n

(For self-hosted users) For fine-tuned models, the base model. For base models, this will be the same as name.

"},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.inference_framework_tag","title":"inference_framework_tag instance-attribute class-attribute","text":"
inference_framework_tag: Optional[str] = Field(default=None, description='(For self-hosted users) The Docker image tag used to run the model.')\n

(For self-hosted users) The Docker image tag used to run the model.

"},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.num_shards","title":"num_shards instance-attribute class-attribute","text":"
num_shards: Optional[int] = Field(default=None, description='(For self-hosted users) The number of shards.')\n

(For self-hosted users) The number of shards.

"},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.quantize","title":"quantize instance-attribute class-attribute","text":"
quantize: Optional[Quantization] = Field(default=None, description='(For self-hosted users) The quantization method.')\n

(For self-hosted users) The quantization method.

"},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.spec","title":"spec instance-attribute class-attribute","text":"
spec: Optional[GetModelEndpointResponse] = Field(default=None, description='(For self-hosted users) Model endpoint details.')\n

(For self-hosted users) Model endpoint details.

"},{"location":"api/data_types/#llmengine.ListLLMEndpointsResponse","title":"ListLLMEndpointsResponse","text":"

Bases: BaseModel

Response object for listing Models.

"},{"location":"api/data_types/#llmengine.data_types.ListLLMEndpointsResponse.model_endpoints","title":"model_endpoints instance-attribute class-attribute","text":"
model_endpoints: List[GetLLMEndpointResponse] = Field(Ellipsis, description='The list of models.')\n

A list of Models, represented as GetLLMEndpointResponses.

"},{"location":"api/data_types/#llmengine.DeleteLLMEndpointResponse","title":"DeleteLLMEndpointResponse","text":"

Bases: BaseModel

Response object for deleting a Model.

"},{"location":"api/data_types/#llmengine.data_types.DeleteLLMEndpointResponse.deleted","title":"deleted instance-attribute class-attribute","text":"
deleted: bool = Field(Ellipsis, description='Whether deletion was successful.')\n

Whether the deletion succeeded.

"},{"location":"api/error_handling/","title":"Error handling","text":"

LLM Engine uses conventional HTTP response codes to indicate the success or failure of an API request. In general: codes in the 2xx range indicate success. Codes in the 4xx range indicate indicate an error that failed given the information provided (e.g. a given Model was not found, or an invalid temperature was specified). Codes in the 5xx range indicate an error with the LLM Engine servers.

In the Python client, errors are presented via a set of corresponding Exception classes, which should be caught and handled by the user accordingly.

"},{"location":"api/error_handling/#llmengine.errors.BadRequestError","title":"BadRequestError","text":"
BadRequestError(message: str)\n

Bases: Exception

Corresponds to HTTP 400. Indicates that the request had inputs that were invalid. The user should not attempt to retry the request without changing the inputs.

"},{"location":"api/error_handling/#llmengine.errors.UnauthorizedError","title":"UnauthorizedError","text":"
UnauthorizedError(message: str)\n

Bases: Exception

Corresponds to HTTP 401. This means that no valid API key was provided.

"},{"location":"api/error_handling/#llmengine.errors.NotFoundError","title":"NotFoundError","text":"
NotFoundError(message: str)\n

Bases: Exception

Corresponds to HTTP 404. This means that the resource (e.g. a Model, FineTune, etc.) could not be found. Note that this can also be returned in some cases where the object might exist, but the user does not have access to the object. This is done to avoid leaking information about the existence or nonexistence of said object that the user does not have access to.

"},{"location":"api/error_handling/#llmengine.errors.RateLimitExceededError","title":"RateLimitExceededError","text":"
RateLimitExceededError(message: str)\n

Bases: Exception

Corresponds to HTTP 429. Too many requests hit the API too quickly. We recommend an exponential backoff for retries.

"},{"location":"api/error_handling/#llmengine.errors.ServerError","title":"ServerError","text":"
ServerError(status_code: int, message: str)\n

Bases: Exception

Corresponds to HTTP 5xx errors on the server.

"},{"location":"api/langchain/","title":"\ud83e\udd9c Langchain","text":"

Coming soon!

"},{"location":"api/python_client/","title":"\ud83d\udc0d Python Client API Reference","text":""},{"location":"api/python_client/#llmengine.Completion","title":"Completion","text":"

Bases: APIEngine

Completion API. This API is used to generate text completions.

Language models are trained to understand natural language and predict text outputs as a response to their inputs. The inputs are called prompts and the outputs are referred to as completions. LLMs take the input prompts and chunk them into smaller units called tokens to process and generate language. Tokens may include trailing spaces and even sub-words; this process is language dependent.

The Completion API can be run either synchronous or asynchronously (via Python asyncio). For each of these modes, you can also choose whether to stream token responses or not.

"},{"location":"api/python_client/#llmengine.completion.Completion.create","title":"create classmethod","text":"
create(model: str, prompt: str, max_new_tokens: int = 20, temperature: float = 0.2, timeout: int = 10, stream: bool = False) -> Union[CompletionSyncResponse, Iterator[CompletionStreamResponse]]\n

Creates a completion for the provided prompt and parameters synchronously.

This API can be used to get the LLM to generate a completion synchronously. It takes as parameters the modelsee Model Zoo and the prompt. Optionally it takes max_new_tokens, temperature, timeout and stream. It returns CompletionSyncV1Response if stream=False or an async iterator of CompletionStreamV1Response with request_id and outputs fields.

Parameters:

Name Type Description Default model str

Name of the model to use. See Model Zoo for a list of Models that are supported.

required prompt str

The prompt to generate completions for, encoded as a string.

required max_new_tokens int

The maximum number of tokens to generate in the completion.

The token count of your prompt plus max_new_tokens cannot exceed the model's context length. See Model Zoo for information on each supported model's context length.

20 temperature float

What sampling temperature to use, in the range (0, 1]. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.

0.2 timeout int

Timeout in seconds. This is the maximum amount of time you are willing to wait for a response.

10 stream bool

Whether to stream the response. If true, the return type is an Iterator[CompletionStreamResponse]. Otherwise, the return type is a CompletionSyncResponse. When streaming, tokens will be sent as data-only server-sent events.

False

Returns:

Name Type Description response Union[CompletionSyncResponse, AsyncIterable[CompletionStreamResponse]]

The generated response (if stream=False) or iterator of response chunks (if stream=True)

Token streaming can be used to reduce percieved latency for applications:

Synchronous completion without token streaming in pythonResponse in json
from llmengine import Completion\n\nresponse = Completion.create(\n    model=\"llama-7b\",\n    prompt=\"Hello, my name is\",\n    max_new_tokens=10,\n    temperature=0.2,\n)\nprint(response.json())\n
{\n    \"request_id\": \"0123456789\",\n    \"outputs\":\n    [\n        {\n            \"text\": \"_______ and I am a _______\",\n            \"num_completion_tokens\": 10\n}\n    ],\n    \"traceback\": null\n}\n

Here is how applications can use streaming:

Synchronous completion with token streaming in pythonResponse in json
from llmengine import Completion\n\nstream = Completion.create(\n    model=\"llama-7b\",\n    prompt=\"why is the sky blue?\",\n    max_new_tokens=5,\n    temperature=0.2,\n    stream=True,\n)\n\nfor response in stream:\n    if response.output:\n        print(response.json())\n
{\"request_id\": \"0123456789\", \"output\": {\"text\": \"\\n\", \"finished\": false, \"num_completion_tokens\": 1 } }\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \"I\", \"finished\": false, \"num_completion_tokens\": 2 } }\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \" don\", \"finished\": false, \"num_completion_tokens\": 3 } }\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \"\u2019\", \"finished\": false, \"num_completion_tokens\": 4 } }\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \"t\", \"finished\": true, \"num_completion_tokens\": 5 } }\n
"},{"location":"api/python_client/#llmengine.completion.Completion.acreate","title":"acreate async classmethod","text":"
acreate(model: str, prompt: str, max_new_tokens: int = 20, temperature: float = 0.2, timeout: int = 10, stream: bool = False) -> Union[CompletionSyncResponse, AsyncIterable[CompletionStreamResponse]]\n

Creates a completion for the provided prompt and parameters asynchronously (with asyncio).

This API can be used to get the LLM to generate a completion asynchronously. It takes as parameters the modelsee Model Zoo and the prompt. Optionally it takes max_new_tokens, temperature, timeout and stream. It returns CompletionSyncV1Response if stream=False or an async iterator of CompletionStreamV1Response with request_id and outputs fields.

Parameters:

Name Type Description Default model str

Name of the model to use. See Model Zoo for a list of Models that are supported.

required prompt str

The prompt to generate completions for, encoded as a string.

required max_new_tokens int

The maximum number of tokens to generate in the completion.

The token count of your prompt plus max_new_tokens cannot exceed the model's context length. See Model Zoo for information on each supported model's context length.

20 temperature float

What sampling temperature to use, in the range (0, 1]. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.

0.2 timeout int

Timeout in seconds. This is the maximum amount of time you are willing to wait for a response.

10 stream bool

Whether to stream the response. If true, the return type is an Iterator[CompletionStreamResponse]. Otherwise, the return type is a CompletionSyncResponse. When streaming, tokens will be sent as data-only server-sent events.

False

Returns:

Name Type Description response Union[CompletionSyncResponse, AsyncIterable[CompletionStreamResponse]]

The generated response (if stream=False) or iterator of response chunks (if stream=True)

Token streaming can be used to reduce percieved latency for applications:

Asynchronous completion without token streaming in pythonResponse in json
import asyncio\nfrom llmengine import Completion\n\nasync def main():\n    response = await Completion.acreate(\n        model=\"llama-7b\",\n        prompt=\"Hello, my name is\",\n        max_new_tokens=10,\n        temperature=0.2,\n    )\n    print(response.json())\n\nasyncio.run(main())\n
{\n    \"request_id\": \"b1b2c3d4e5f6g7h8i9j0\",\n    \"outputs\":\n    [\n        {\n            \"text\": \"_______, and I am a _____\",\n            \"num_completion_tokens\": 10\n}\n    ],\n}\n

Here is how applications can use streaming:

Asynchronous completion with token streaming in pythonResponse in json
import asyncio\nfrom llmengine import Completion\n\nasync def main():\n    stream = await Completion.acreate(\n        model=\"llama-7b\",\n        prompt=\"why is the sky blue?\",\n        max_new_tokens=5,\n        temperature=0.2,\n        stream=True,\n    )\n\nasync for response in stream:\n        if response.output:\n            print(response.json())\n\nasyncio.run(main())\n
{\"request_id\": \"0123456789\", \"output\": {\"text\": \"\\n\", \"finished\": false, \"num_completion_tokens\": 1}}\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \"I\", \"finished\": false, \"num_completion_tokens\": 2}}\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \" think\", \"finished\": false, \"num_completion_tokens\": 3}}\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \" the\", \"finished\": false, \"num_completion_tokens\": 4}}\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \" sky\", \"finished\": true, \"num_completion_tokens\": 5}}\n
"},{"location":"api/python_client/#llmengine.FineTune","title":"FineTune","text":"

Bases: APIEngine

FineTune API. This API is used to fine-tune models.

Fine-tuning is a process where the LLM is further trained on a task-specific dataset, allowing the model to adjust its parameters to better align with the task at hand. Fine-tuning is a supervised training phase, where prompt/response pairs are provided to optimize the performance of the LLM.

LLM Engine provides APIs to create fine-tunes on a base model with training & validation datasets. APIs are also provided to list, cancel and retrieve fine-tuning jobs.

Creating a fine-tune will end with the creation of a Model, which you can view using Model.get(model_name) or delete using Model.delete(model_name).

"},{"location":"api/python_client/#llmengine.fine_tuning.FineTune.create","title":"create classmethod","text":"
create(model: str, training_file: str, validation_file: Optional[str] = None, hyperparameters: Optional[Dict[str, str]] = None, suffix: Optional[str] = None) -> CreateFineTuneResponse\n

Creates a job that fine-tunes a specified model from a given dataset.

This API can be used to fine-tune a model. The model is the name of base model (Model Zoo for available models) to fine-tune. The training file should consist of prompt and response pairs. Your data must be formatted as a CSV file that includes two columns: prompt and response. A maximum of 100,000 rows of data is currently supported. At least 200 rows of data is recommended to start to see benefits from fine-tuning.

Parameters:

Name Type Description Default model `str`

The name of the base model to fine-tune. See Model Zoo for the list of available models to fine-tune.

required training_file `str`

Publicly accessible URL to a CSV file for training.

required validation_file `Optional[str]`

Publicly accessible URL to a CSV file for validation.

None hyperparameters `Optional[Dict[str, str]]`

A dict of hyperparameters to customize fine-tuning behavior.

Currently supported hyperparameters:

  • lr: Peak learning rate used during fine-tuning. It decays with a cosine schedule afterward. (Default: 2e-5)
  • warmup_ratio: Ratio of training steps used for learning rate warmup. (Default: 0.03)
  • epochs: Number of fine-tuning epochs. (Default: 5)
  • weight_decay: Regularization penalty applied to learned weights. (Default: 0.001)
  • max_seq_length: Maximum number of tokens per sequence in the dataset. (Default: 1024)
None suffix `Optional[str]`

A string that will be added to your fine-tuned model name.

None

Returns:

Name Type Description CreateFineTuneResponse CreateFineTuneResponse

an object that contains the ID of the created fine-tuning job

Here is an example script to create a 5-row CSV of properly formatted data for fine-tuning an airline question answering bot:

Formatting data in python
import csv\n# Define data\ndata = [\n  (\"What is your policy on carry-on luggage?\", \"Our policy allows each passenger to bring one piece of carry-on luggage and one personal item such as a purse or briefcase. The maximum size for carry-on luggage is 22 x 14 x 9 inches.\"),\n  (\"How can I change my flight?\", \"You can change your flight through our website or mobile app. Go to 'Manage my booking' section, enter your booking reference and last name, then follow the prompts to change your flight.\"),\n  (\"What meals are available on my flight?\", \"We offer a variety of meals depending on the flight's duration and route. These can range from snacks and light refreshments to full-course meals on long-haul flights. Specific meal options can be viewed during the booking process.\"),\n  (\"How early should I arrive at the airport before my flight?\", \"We recommend arriving at least two hours before domestic flights and three hours before international flights.\"),\n  \"Can I select my seat in advance?\", \"Yes, you can select your seat during the booking process or afterwards via the 'Manage my booking' section on our website or mobile app.\"),\n  ]\n\n# Write data to a CSV file\nwith open('customer_service_data.csv', 'w', newline='') as file:\n    writer = csv.writer(file)\n    writer.writerow([\"prompt\", \"response\"])\n    writer.writerows(data)\n

Example code for fine-tuning:

Fine-tuning in pythonResponse in json
from llmengine import FineTune\n\nresponse = FineTune.create(\n    model=\"llama-7b\",\n    training_file=\"https://my-bucket.s3.us-west-2.amazonaws.com/path/to/training-file.csv\",\n)\n\nprint(response.json())\n
{\n    \"fine_tune_id\": \"ft_abc123\"\n}\n
"},{"location":"api/python_client/#llmengine.fine_tuning.FineTune.get","title":"get classmethod","text":"
get(fine_tune_id: str) -> GetFineTuneResponse\n

Get status of a fine-tuning job.

This API can be used to get the status of an already running fine-tuning job. It takes as a single parameter the fine_tune_id and returns a GetFineTuneResponse object with the id and status (PENDING, STARTED, UNDEFINED, FAILURE or SUCCESS).

Parameters:

Name Type Description Default fine_tune_id `str`

ID of the fine-tuning job

required

Returns:

Name Type Description GetFineTuneResponse GetFineTuneResponse

an object that contains the ID and status of the requested job

Getting status of fine-tuning in pythonResponse in json
from llmengine import FineTune\n\nresponse = FineTune.get(\n    fine_tune_id=\"ft_abc123\",\n)\n\nprint(response.json())\n
{\n    \"fine_tune_id\": \"ft_abc123\",\n    \"status\": \"STARTED\"\n}\n
"},{"location":"api/python_client/#llmengine.fine_tuning.FineTune.list","title":"list classmethod","text":"
list() -> ListFineTunesResponse\n

List fine-tuning jobs.

This API can be used to list all the fine-tuning jobs. It returns a list of pairs of fine_tune_id and status for all existing jobs.

Returns:

Name Type Description ListFineTunesResponse ListFineTunesResponse

an object that contains a list of all fine-tuning jobs and their statuses

Listing fine-tuning jobs in pythonResponse in json
from llmengine import FineTune\n\nresponse = FineTune.list()\nprint(response.json())\n
{\n    \"jobs\": [\n        {\n            \"fine_tune_id\": \"ft_abc123\",\n            \"status\": \"STARTED\"\n},\n        {\n            \"fine_tune_id\": \"ft_def456\",\n            \"status\": \"SUCCESS\"\n}\n    ]\n}\n
"},{"location":"api/python_client/#llmengine.fine_tuning.FineTune.cancel","title":"cancel classmethod","text":"
cancel(fine_tune_id: str) -> CancelFineTuneResponse\n

Cancel a fine-tuning job.

This API can be used to cancel an existing fine-tuning job if it's no longer required. It takes as parameter the fine_tune_id and returns a response object which has a success field confirming if the cancellation was successful.

Parameters:

Name Type Description Default fine_tune_id `str`

ID of the fine-tuning job

required

Returns:

Name Type Description CancelFineTuneResponse CancelFineTuneResponse

an object that contains whether the cancellation was successful

Cancelling fine-tuning job in pythonResponse in json
from llmengine import FineTune\n\nresponse = FineTune.cancel(fine_tune_id=\"ft_abc123\")\nprint(response.json())\n
{\n    \"success\": true\n}\n
"},{"location":"api/python_client/#llmengine.Model","title":"Model","text":"

Bases: APIEngine

Model API. This API is used to get, list, and delete models. Models include both base models built into LLM Engine, and fine-tuned models that you create through the FineTune.create() API.

See Model Zoo for the list of publicly available base models.

"},{"location":"api/python_client/#llmengine.model.Model.get","title":"get classmethod","text":"
get(model: str) -> GetLLMEndpointResponse\n

Get information about an LLM model.

This API can be used to get information about a Model's source and inference framework. For self-hosted users, it returns additional information about number of shards, quantization, infra settings, etc. The function takes as a single parameter the name model and returns a GetLLMEndpointResponse object.

Parameters:

Name Type Description Default model `str`

Name of the model

required

Returns:

Name Type Description GetLLMEndpointResponse GetLLMEndpointResponse

object representing the LLM and configurations

Accessing model in pythonResponse in json
from llmengine import Model\n\nresponse = Model.get(\"llama-7b.suffix.2023-07-18-12-00-00\")\n\nprint(response.json())\n
{\n    \"id\": null,\n    \"name\": \"llama-7b.suffix.2023-07-18-12-00-00\",\n    \"model_name\": null,\n    \"source\": \"hugging_face\",\n    \"inference_framework\": \"text_generation_inference\",\n    \"inference_framework_tag\": null,\n    \"num_shards\": null,\n    \"quantize\": null,\n    \"spec\": null\n}\n
"},{"location":"api/python_client/#llmengine.model.Model.list","title":"list classmethod","text":"
list() -> ListLLMEndpointsResponse\n

List LLM models available to call inference on.

This API can be used to list all available models, including both publicly available models and user-created fine-tuned models. It returns a list of GetLLMEndpointResponse objects for all models. The most important field is the model name.

Returns:

Name Type Description ListLLMEndpointsResponse ListLLMEndpointsResponse

list of models

Listing available modes in pythonResponse in json
from llmengine import Model\n\nresponse = Model.list()\nprint(response.json())\n
{\n    \"model_endpoints\": [\n        {\n            \"id\": null,\n            \"name\": \"llama-7b.suffix.2023-07-18-12-00-00\",\n            \"model_name\": null,\n            \"source\": \"hugging_face\",\n            \"inference_framework\": \"text_generation_inference\",\n            \"inference_framework_tag\": null,\n            \"num_shards\": null,\n            \"quantize\": null,\n            \"spec\": null\n},\n        {\n            \"id\": null,\n            \"name\": \"llama-7b\",\n            \"model_name\": null,\n            \"source\": \"hugging_face\",\n            \"inference_framework\": \"text_generation_inference\",\n            \"inference_framework_tag\": null,\n            \"num_shards\": null,\n            \"quantize\": null,\n            \"spec\": null\n},\n        {\n            \"id\": null,\n            \"name\": \"llama-13b-deepspeed-sync\",\n            \"model_name\": null,\n            \"source\": \"hugging_face\",\n            \"inference_framework\": \"deepspeed\",\n            \"inference_framework_tag\": null,\n            \"num_shards\": null,\n            \"quantize\": null,\n            \"spec\": null\n},\n        {\n            \"id\": null,\n            \"name\": \"falcon-40b\",\n            \"model_name\": null,\n            \"source\": \"hugging_face\",\n            \"inference_framework\": \"text_generation_inference\",\n            \"inference_framework_tag\": null,\n            \"num_shards\": null,\n            \"quantize\": null,\n            \"spec\": null\n}\n    ]\n}\n
"},{"location":"api/python_client/#llmengine.model.Model.delete","title":"delete classmethod","text":"
delete(model: str) -> DeleteLLMEndpointResponse\n

Deletes an LLM model.

This API can be used to delete a fine-tuned model. It takes as parameter the name of the model and returns a response object which has a deleted field confirming if the deletion was successful. If called on a base model included with LLM Engine, an error will be thrown.

Parameters:

Name Type Description Default model `str`

Name of the model

required

Returns:

Name Type Description response DeleteLLMEndpointResponse

whether the model was successfully deleted

Deleting model in pythonResponse in json
from llmengine import Model\n\nresponse = Model.delete(\"llama-7b.suffix.2023-07-18-12-00-00\")\nprint(response.json())\n
{\n    \"deleted\": true\n}\n
"},{"location":"guides/completions/","title":"Completions","text":"

Language Models are trained to understand natural language and provide text outputs as a response to their inputs. The inputs are called prompts and outputs are referred to as completions. LLMs take the input prompts and chunk them smaller units called tokens to process and generate language. Tokens may include trailing spaces and even sub-words, this process is language dependent.

Scale LLM Engine provides access to open source language models (see Model Zoo) that can be used for producing completions to prompts.

"},{"location":"guides/completions/#completion-api-call","title":"Completion API call","text":"

An example API call looks as follows:

from llmengine import Completion\n\nresponse = Completion.create(\n    model=\"llama-7b\",\n    prompt=\"Hello, my name is\",\n    max_new_tokens=10,\n    temperature=0.2,\n)\n

The model_name is the LLM to be used (see Model Zoo). The prompt is the main input for the LLM to respond to. The max_new_tokens parameter is the maximum number of tokens to generate in the chat completion. The temperature is the sampling temperature to use. Higher values make the output more random, while lower values will make it more focussed and deterministic.

See the full API reference documentation to learn more.

"},{"location":"guides/completions/#completion-api-response","title":"Completion API response","text":"

An example Completion API response looks as follows:

Response in jsonResponse in python
{\n  \"outputs\": [\n    {\n      \"text\": \"_______ and I am a _______\",\n      \"num_completion_tokens\": 10\n}\n  ]\n}\n
>>> print( response.outputs[0].text )\n________. Hello, my name is _\n
"},{"location":"guides/completions/#token-streaming","title":"Token streaming","text":"

The Completions API support token streaming to reduce perceived latency for certain applications. When streaming, tokens will be sent as data-only server-side events.

To enable token streaming, pass stream=True to either Completion.create or Completion.acreate.

An example of token streaming using the synchronous Completions API looks as follows

Token streaming with synchronous API in python
from llmengine import Completion\n\nstream = Completion.create(\n    model=\"llama-7b\",\n    prompt=\"why is the sky blue?\",\n    max_new_tokens=5,\n    temperature=0.2,\n    stream=True,\n)\n\nfor response in stream:\n    if response.output:\n        print(response.json())\n
"},{"location":"guides/completions/#async-requests","title":"Async requests","text":"

The Python client supports asyncio for creating Completions. Use Completion.acreate instead of Completion.create to utilize async processing. The function signatures are otherwise identical.

An example of async Completions looks as follows

Completions with asynchronous API in python
import asyncio\nfrom llmengine import Completion\n\nasync def main():\n    response = await Completion.acreate(\n        model=\"llama-7b\",\n        prompt=\"Hello, my name is\",\n        max_new_tokens=10,\n        temperature=0.2,\n    )\n    print(response.json())\n\nasyncio.run(main())\n
"},{"location":"guides/completions/#which-model-should-i-use","title":"Which model should I use?","text":"

See the Model Zoo for more information on best practices for which model to use for Completions.

"},{"location":"guides/fine_tuning/","title":"Fine-tuning","text":"

Learn how to customize your models on your data with fine-tuning.

"},{"location":"guides/fine_tuning/#introduction","title":"Introduction","text":"

Fine-tuning helps improve model performance by training on specific examples of prompts and desired responses. LLMs are initially trained on data collected from the entire internet. With fine-tuning, LLMs can be optimized to perform better in a specific domain by learning from examples for that domain. Smaller LLMs that have been fine-tuned on a specific use case often outperform larger ones that were trained more generally.

Fine-tuning allows for:

  1. Higher quality results than prompt engineering alone
  2. Cost savings through shorter prompts
  3. The ability to reach equivalent accuracy with a smaller model
  4. Lower latency at inference time
  5. The chance to show an LLM more examples than can fit in a single context window

LLM Engine's fine-tuning API lets you fine-tune various open source LLMs on your own data and then make inference calls to the resulting LLM. For more specific details, see the fine-tuning API reference.

"},{"location":"guides/fine_tuning/#producing-high-quality-data-for-fine-tuning","title":"Producing high quality data for fine-tuning","text":"

The training data for fine-tuning should consist of prompt and response pairs.

As a rule of thumb, you should expect to see linear improvements in your fine-tuned model's quality with each doubling of the dataset size. Having high-quality data is also essential to improving performance. For every linear increase in the error rate in your training data, you may encounter a roughly quadratic increase in your fine-tuned model's error rate.

High quality data is critical to achieve improved model performance, and in several cases will require experts to generate and prepare data - the breadth and diversity of the data is highly critical. Scale's Data Engine can help prepare such high quality, diverse data sets - more information here.

"},{"location":"guides/fine_tuning/#preparing-data","title":"Preparing data","text":"

Your data must be formatted as a CSV file that includes two columns: prompt and response. A maximum of 100,000 rows of data is currently supported. At least 200 rows of data is recommended to start to see benefits from fine-tuning.

Here is an example script to create a 50-row CSV of properly formatted data for fine-tuning an airline question answering bot

Creating a sample dataset
import csv\n# Define data\ndata = [\n    (\"What is your policy on carry-on luggage?\", \"Our policy allows each passenger to bring one piece of carry-on luggage and one personal item such as a purse or briefcase. The maximum size for carry-on luggage is 22 x 14 x 9 inches.\"),\n    (\"How can I change my flight?\", \"You can change your flight through our website or mobile app. Go to 'Manage my booking' section, enter your booking reference and last name, then follow the prompts to change your flight.\"),\n    (\"What meals are available on my flight?\", \"We offer a variety of meals depending on the flight's duration and route. These can range from snacks and light refreshments to full-course meals on long-haul flights. Specific meal options can be viewed during the booking process.\"),\n    (\"How early should I arrive at the airport before my flight?\", \"We recommend arriving at least two hours before domestic flights and three hours before international flights.\"),\n    (\"Can I select my seat in advance?\", \"Yes, you can select your seat during the booking process or afterwards via the 'Manage my booking' section on our website or mobile app.\"),\n    (\"What should I do if my luggage is lost?\", \"If your luggage is lost, please report this immediately at our 'Lost and Found' counter at the airport. We will assist you in tracking your luggage.\"),\n    (\"Do you offer special assistance for passengers with disabilities?\", \"Yes, we offer special assistance for passengers with disabilities. Please notify us of your needs at least 48 hours prior to your flight.\"),\n    (\"Can I bring my pet on the flight?\", \"Yes, we allow small pets in the cabin, and larger pets in the cargo hold. Please check our pet policy for more details.\"),\n    (\"What is your policy on flight cancellations?\", \"In case of flight cancellations, we aim to notify passengers as early as possible and offer either a refund or a rebooking on the next available flight.\"),\n    (\"Can I get a refund if I cancel my flight?\", \"Refunds depend on the type of ticket purchased. Please check our cancellation policy for details. Non-refundable tickets, however, are typically not eligible for refunds unless due to extraordinary circumstances.\"),\n    (\"How can I check-in for my flight?\", \"You can check-in for your flight either online, through our mobile app, or at the airport. Online and mobile app check-in opens 24 hours before departure and closes 90 minutes before.\"),\n    (\"Do you offer free meals on your flights?\", \"Yes, we serve free meals on all long-haul flights. For short-haul flights, we offer a complimentary drink and snack. Special meal requests should be made at least 48 hours before departure.\"),\n    (\"Can I use my electronic devices during the flight?\", \"Small electronic devices can be used throughout the flight in flight mode. Larger devices like laptops may be used above 10,000 feet.\"),\n    (\"How much baggage can I check-in?\", \"The checked baggage allowance depends on the class of travel and route. The details would be mentioned on your ticket, or you can check on our website.\"),\n    (\"How can I request for a wheelchair?\", \"To request a wheelchair or any other special assistance, please call our customer service at least 48 hours before your flight.\"),\n    (\"Do I get a discount for group bookings?\", \"Yes, we offer discounts on group bookings of 10 or more passengers. Please contact our group bookings team for more information.\"),\n    (\"Do you offer Wi-fi on your flights?\", \"Yes, we offer complimentary Wi-fi on select flights. You can check the availability during the booking process.\"),\n    (\"What is the minimum connecting time between flights?\", \"The minimum connecting time varies depending on the airport and whether your flight is international or domestic. Generally, it's recommended to allow at least 45-60 minutes for domestic connections and 60-120 minutes for international.\"),\n    (\"Do you offer duty-free shopping on international flights?\", \"Yes, we have a selection of duty-free items that you can pre-order on our website or purchase onboard on international flights.\"),\n    (\"Can I upgrade my ticket to business class?\", \"Yes, you can upgrade your ticket through the 'Manage my booking' section on our website or by contacting our customer service. The availability and costs depend on the specific flight.\"),\n    (\"Can unaccompanied minors travel on your flights?\", \"Yes, we do accommodate unaccompanied minors on our flights, with special services to ensure their safety and comfort. Please contact our customer service for more details.\"),\n    (\"What amenities do you provide in business class?\", \"In business class, you will enjoy additional legroom, reclining seats, premium meals, priority boarding and disembarkation, access to our business lounge, extra baggage allowance, and personalized service.\"),\n    (\"How much does extra baggage cost?\", \"Extra baggage costs vary based on flight route and the weight of the baggage. Please refer to our 'Extra Baggage' section on the website for specific rates.\"),\n    (\"Are there any specific rules for carrying liquids in carry-on?\", \"Yes, liquids carried in your hand luggage must be in containers of 100 ml or less and they should all fit into a single, transparent, resealable plastic bag of 20 cm x 20 cm.\"),\n    (\"What if I have a medical condition that requires special assistance during the flight?\", \"We aim to make the flight comfortable for all passengers. If you have a medical condition that may require special assistance, please contact our \u2018special services\u2019 team 48 hours before your flight.\"),\n    (\"What in-flight entertainment options are available?\", \"We offer a range of in-flight entertainment options including a selection of movies, TV shows, music, and games, available on your personal seat-back screen.\"),\n    (\"What types of payment methods do you accept?\", \"We accept credit/debit cards, PayPal, bank transfers, and various other forms of payment. The available options may vary depending on the country of departure.\"),\n    (\"How can I earn and redeem frequent flyer miles?\", \"You can earn miles for every journey you take with us or our partner airlines. These miles can be redeemed for flight tickets, upgrades, or various other benefits. To earn and redeem miles, you need to join our frequent flyer program.\"),\n    (\"Can I bring a stroller for my baby?\", \"Yes, you can bring a stroller for your baby. It can be checked in for free and will normally be given back to you at the aircraft door upon arrival.\"),\n    (\"What age does my child have to be to qualify as an unaccompanied minor?\", \"Children aged between 5 and 12 years who are traveling alone are considered unaccompanied minors. Our team provides special care for these children from departure to arrival.\"),\n    (\"What documents do I need to travel internationally?\", \"For international travel, you need a valid passport and may also require visas, depending on your destination and your country of residence. It's important to check the specific requirements before you travel.\"),\n    (\"What happens if I miss my flight?\", \"If you miss your flight, please contact our customer service immediately. Depending on the circumstances, you may be able to rebook on a later flight, but additional fees may apply.\"),\n    (\"Can I travel with my musical instrument?\", \"Yes, small musical instruments can be brought on board as your one carry-on item. Larger instruments must be transported in the cargo, or if small enough, a seat may be purchased for them.\"),\n    (\"Do you offer discounts for children or infants?\", \"Yes, children aged 2-11 traveling with an adult usually receive a discount on the fare. Infants under the age of 2 who do not occupy a seat can travel for a reduced fare or sometimes for free.\"),\n    (\"Is smoking allowed on your flights?\", \"No, all our flights are non-smoking for the comfort and safety of all passengers.\"),\n    (\"Do you have family seating?\", \"Yes, we offer the option to seat families together. You can select seats during booking or afterwards through the 'Manage my booking' section on the website.\"),\n    (\"Is there any discount for senior citizens?\", \"Some flights may offer a discount for senior citizens. Please check our website or contact customer service for accurate information.\"),\n    (\"What items are prohibited on your flights?\", \"Prohibited items include, but are not limited to, sharp objects, firearms, explosive materials, and certain chemicals. You can find a comprehensive list on our website under the 'Security Regulations' section.\"),\n    (\"Can I purchase a ticket for someone else?\", \"Yes, you can purchase a ticket for someone else. You'll need their correct name as it appears on their government-issued ID, and their correct travel dates.\"),\n    (\"What is the process for lost and found items on the plane?\", \"If you realize you forgot an item on the plane, report it as soon as possible to our lost and found counter. We will make every effort to locate and return your item.\"),\n    (\"Can I request a special meal?\", \"Yes, we offer a variety of special meals to accommodate dietary restrictions. Please request your preferred meal at least 48 hours prior to your flight.\"),\n    (\"Is there a weight limit for checked baggage?\", \"Yes, luggage weight limits depend on your ticket class and route. You can find the details on your ticket or by visiting our website.\"),\n    (\"Can I bring my sports equipment?\", \"Yes, certain types of sports equipment can be carried either as or in addition to your permitted baggage. Some equipment may require additional fees. It's best to check our policy on our website or contact us directly.\"),\n    (\"Do I need a visa to travel to certain countries?\", \"Yes, visa requirements depend on the country you are visiting and your nationality. We advise checking with the relevant embassy or consulate prior to travel.\"),\n    (\"How can I add extra baggage to my booking?\", \"You can add extra baggage to your booking through the 'Manage my booking' section on our website or by contacting our customer services.\"),\n    (\"Can I check-in at the airport?\", \"Yes, you can choose to check-in at the airport. However, we also offer online and mobile check-in, which may save you time.\"),\n    (\"How do I know if my flight is delayed or cancelled?\", \"In case of any changes to your flight, we will attempt to notify all passengers using the contact information given at the time of booking. You can also check your flight status on our website.\"),\n    (\"What is your policy on pregnant passengers?\", \"Pregnant passengers can travel up to the end of the 36th week for single pregnancies, and the end of the 32nd week for multiple pregnancies. We recommend consulting your doctor before any air travel.\"),\n    (\"Can children travel alone?\", \"Yes, children age 5 to 12 can travel alone as unaccompanied minors. We provide special care for these seats. Please contact our customer service for more information.\"),\n    (\"How can I pay for my booking?\", \"You can pay for your booking using a variety of methods including credit and debit cards, PayPal, or bank transfers. The options may vary depending on the country of departure.\"),\n]\n\n# Write data to a CSV file\nwith open('customer_service_data.csv', 'w', newline='') as file:\n    writer = csv.writer(file)\n    writer.writerow([\"prompt\", \"response\"])\n    writer.writerows(data)\n
"},{"location":"guides/fine_tuning/#making-your-data-accessible-to-llm-engine","title":"Making your data accessible to LLM Engine","text":"

Currently, data needs to be uploaded to a publicly accessible web URL so that it can be read for fine-tuning. Publicly accessible HTTP, HTTPS, and S3 URLs are currently supported. Support for privately sharing data with the LLM Engine API is coming shortly. For quick iteration, you can look into tools like Pastebin or Github Gists to quickly host your CSV files in a public manner. We created an example Github Gist you can see here. To use the gist, you can just use the URL given when you click the \u201cRaw\u201d button (URL).

"},{"location":"guides/fine_tuning/#launching-the-fine-tune","title":"Launching the fine-tune","text":"

Once you have uploaded your data, you can use the LLM Engine API to launch a fine-tune. You will need to specify which base model to fine-tune, the locations of the training file and optional validation data file, an optional set of hyperparameters to customize the fine-tuning behavior, and an optional suffix to append to the name of the fine-tune.

If you specify a suffix, the fine-tune will be named model:suffix:<timestamp>. If you do not, the fine-tune will be named model:<timestamp>. The timestamp will be the time the fine-tune was launched.

Hyper-parameters for fine-tune * `lr`: Peak learning rate used during fine-tuning. It decays with a cosine schedule afterward. (Default: 2e-5) * `warmup_ratio`: Ratio of training steps used for learning rate warmup. (Default: 0.03) * `epochs`: Number of fine-tuning epochs. (Default: 5) * `weight_decay`: Regularization penalty applied to learned weights. (Default: 0.001) * `max_seq_length`: Maximum number of tokens per sequence in the dataset. (Default: 1024) Create a fine-tune in python
from llmengine import FineTune\n\nresponse = FineTune.create(\n    model=\"llama-7b\",\n    training_file=\"s3://my-bucket/path/to/training-file.csv\",\n)\n\nprint(response.json())\n

See the Model Zoo to see which models have fine-tuning support.

Once the fine-tune is launched, you can also get the status of your fine-tune.

"},{"location":"guides/fine_tuning/#making-inference-calls-to-your-fine-tune","title":"Making inference calls to your fine-tune","text":"

Once your fine-tune is finished, you will be able to start making inference requests to the model. You can use the fine_tuned_model returned from your FineTune.get API call to reference your fine-tuned model in the Completions API. Alternatively, you can list available LLMs with Model.list in order to find the name of your fine-tuned model. See the Completion API for more details. You can then use that name to direct your completion requests. You must wait until your fine-tune is complete before you can plug it into the Completions API. You can check the status of your fine-tune with FineTune.get.

Inference with a fine-tuned model in python
from llmengine import Completion\n\nresponse = Completion.create(\n    model_name=\"llama-7b:airlines:2023-07-17-08-30-45\",\n    prompt=\"Do you offer in-flight Wi-fi?\",\n    max_new_tokens=100,\n    temperature=0.2,\n)\nprint(response.json())\n
"},{"location":"guides/rate_limits/","title":"Overview","text":""},{"location":"guides/rate_limits/#what-are-rate-limits","title":"What are rate limits?","text":"

A rate limit is a restriction that an API imposes on the number of times a user or client can access the server within a specified period of time.

"},{"location":"guides/rate_limits/#how-do-i-know-if-i-am-rate-limited","title":"How do I know if I am rate limited?","text":"

Per standard HTTP practices, your request will receive a response with HTTP status code of 429, Too Many Requests.

"},{"location":"guides/rate_limits/#what-are-the-rate-limits-for-our-api","title":"What are the rate limits for our API?","text":"

The LLM Engine API is currently in a preview mode, and therefore we currently do not have any advertised rate limits. As the API moves towards a production release, we will update this section with specific rate limits. For now, the API will return HTTP 429 on an as-needed basis.

"},{"location":"guides/rate_limits/#error-mitigation","title":"Error mitigation","text":""},{"location":"guides/rate_limits/#retrying-with-exponential-backoff","title":"Retrying with exponential backoff","text":"

One easy way to avoid rate limit errors is to automatically retry requests with a random exponential backoff. Retrying with exponential backoff means performing a short sleep when a rate limit error is hit, then retrying the unsuccessful request. If the request is still unsuccessful, the sleep length is increased and the process is repeated. This continues until the request is successful or until a maximum number of retries is reached. This approach has many benefits:

  • Automatic retries means you can recover from rate limit errors without crashes or missing data
  • Exponential backoff means that your first retries can be tried quickly, while still benefiting from longer delays if your first few retries fail
  • Adding random jitter to the delay helps retries from all hitting at the same time.

Below are a few example solutions for Python that use exponential backoff.

"},{"location":"guides/rate_limits/#example-1-using-the-tenacity-library","title":"Example #1: Using the tenacity library","text":"

Tenacity is an Apache 2.0 licensed general-purpose retrying library, written in Python, to simplify the task of adding retry behavior to just about anything. To add exponential backoff to your requests, you can use the tenacity.retry decorator. The below example uses the tenacity.wait_random_exponential function to add random exponential backoff to a request.

Exponential backoff in python
import llmengine\nfrom tenacity import (\n    retry,\n    stop_after_attempt,\n    wait_random_exponential,\n)  # for exponential backoff\n@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))\ndef completion_with_backoff(**kwargs):\n    return llmengine.Completion.create(**kwargs)\n\ncompletion_with_backoff(model=\"llama-7b\", prompt=\"Why is the sky blue?\")\n
"},{"location":"guides/rate_limits/#example-2-using-the-backoff-library","title":"Example #2: Using the backoff library","text":"

Another python library that provides function decorators for backoff and retry is backoff:

Decorators for backoff and retry in python
import llmengine\nimport backoff\n@backoff.on_exception(backoff.expo, llmengine.error.RateLimitError)\ndef completions_with_backoff(**kwargs):\n    return llmengine.Completion.create(**kwargs)\n\ncompletions_with_backoff(model=\"llama-7b\", prompt=\"Why is the sky blue?\")\n
"},{"location":"guides/token_streaming/","title":"Token streaming","text":"

The Completions APIs support a stream boolean parameter that, when True, will return a streamed response of token-by-token server-sent events (SSEs) rather than waiting to receive the full response when model generation has finished. This decreases latency of when you start getting a response.

The response will consist of SSEs of the form {\"token\": dict, \"generated_text\": str | null, \"details\": dict | null}, where the dictionary for each token will contain log probability information in addition to the generated string; the generated_text field will be null for all but the last SSE, for which it will contain the full generated response.

"}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Introduction","text":""},{"location":"#llm-engine","title":"LLM Engine","text":"

The open source engine for inference and fine-tuning of Large Language Models.

LLM Engine is the easiest way to customize and serve LLMs.

LLMs can be accessed via Scale's hosted version or by using the helm charts in this repository to run model inference and fine-tuning in your own infrastructure.

"},{"location":"#quick-install","title":"Quick Install","text":"Install the python package
pip install scale-llm-engine\n
"},{"location":"#about","title":"About","text":"

Foundation models are emerging as the building blocks of AI. However, deploying these models to the cloud and fine-tuning them are expensive operations that require infrastructure and ML expertise. It is also difficult to maintain over time as new models are released and new techniques for both inference and fine-tuning are made available.

LLM Engine is a Python library, CLI, and Helm chart that provides everything you need to serve and fine-tune foundation models, whether you use Scale's hosted infrastructure or do it in your own cloud infrastructure using Kubernetes.

"},{"location":"#key-features","title":"Key Features","text":"

Ready-to-use APIs for your favorite models: Deploy and serve open-source foundation models - including LLaMA, MPT and Falcon. Use Scale-hosted models or deploy to your own infrastructure.

Fine-tune your favorite models: Fine-tune open-source foundation models like LLaMA, MPT etc. with your own data for optimized performance.

Optimized Inference: LLM Engine provides inference APIs for streaming responses and dynamically batching inputs for higher throughput and lower latency.

Open-Source Integrations: Deploy any Hugging Face model with a single command.

Deploying from any docker image: Turn any Docker image into an auto-scaling deployment with simple APIs.

"},{"location":"#features-coming-soon","title":"Features Coming Soon","text":"

k8s Installation Documentation: We are working hard to document installation and maintenance of inference and fine-tuning functionality on your own infrastructure. For now, our documentation covers using our client libraries to access Scale's hosted infrastructure.

Fast Cold-Start Times: To prevent GPUs from idling, LLM Engine automatically scales your model to zero when it's not in use and scales up within seconds, even for large foundation models.

Cost Optimization: Deploy AI models cheaper than commercial ones, including cold-start and warm-down times.

"},{"location":"contributing/","title":"Contributing to LLM Engine","text":""},{"location":"contributing/#updating-llm-engine-documentation","title":"Updating LLM Engine Documentation","text":"

LLM Engine leverages mkdocs to create beautiful, community-oriented documentation.

"},{"location":"contributing/#step-1-clone-the-repository","title":"Step 1: Clone the Repository","text":"

Clone/Fork the LLM Engine Repository. Our documentation lives in the docs folder.

"},{"location":"contributing/#step-2-install-the-dependencies","title":"Step 2: Install the Dependencies","text":"

Dependencies are located in requirements-docs.txt, go ahead and pip install those with

pip install -r requirements-docs.txt\n
"},{"location":"contributing/#step-3-run-locally","title":"Step 3: Run Locally","text":"

To run the documentation service locally, execute the following command:

mkdocs serve\n

This should kick off a locally running instance on http://127.0.0.1:8000/.

As you edit the content in the docs folder, the site will be automatically reloaded on each file save.

"},{"location":"contributing/#step-4-editing-navigation-and-settings","title":"Step 4: Editing Navigation and Settings","text":"

If you are less familair with mkdocs, in addition to the markdown content in the docs folder, there is a top-level mkdocs.yml file as well that defines the navigation pane and other website settings. If you don't see your page where you think it should be, double-check the .yml file.

"},{"location":"contributing/#step-5-building-and-deploying","title":"Step 5: Building and Deploying","text":"

CircleCI (via .circleci/config.yml) handles the building and deployment of our documentation service for us.

"},{"location":"faq/","title":"Frequently Asked Questions","text":""},{"location":"getting_started/","title":"Getting Started","text":"

The fastest way to get started with LLM Engine is to use the python client in this repository to run inference and fine-tuning on Scale's infrastructure. This path does not require you to install anything on your infrastructure, and Scale's free tier gives you access to experimentation using open source LLMs.

To start with, install LLM Engine via pip or conda:

pipconda
pip install scale-llm-engine\n
conda install scale-llm-engine -c conda-forge\n
"},{"location":"getting_started/#scale-api-keys","title":"Scale API Keys","text":"

Next, you need a Scale Spellbook API key.

"},{"location":"getting_started/#retrieving-your-api-key","title":"Retrieving your API Key","text":"

To retrieve your API key, head to Scale Spellbook where you will get an API key on the settings page.

Different API Keys for different Scale Products

If you have leveraged Scale's platform for annotation work in the past, please note that your Spellbook API key will be different than the Scale Annotation API key. You will want to create a Spellbook API key before getting started.

"},{"location":"getting_started/#set-your-api-key","title":"Set your API Key","text":"

LLM Engine uses environment variables to access your API key.

Set this API key as the SCALE_API_KEY environment variable by running the following command in your terminal before you run your python application.

export SCALE_API_KEY=\"[Your API key]\"\n

You can also add in the line above to your .zshrc or .bash_profile so it's automatically set for future sessions.

"},{"location":"getting_started/#example-code","title":"Example Code","text":""},{"location":"getting_started/#sample-completion","title":"Sample Completion","text":"

With your API key set, you can now send LLM Engine requests using the Python client:

from llmengine import Completion\n\nresponse = Completion.create(\n    model=\"falcon-7b-instruct\",\n    prompt=\"I'm opening a pancake restaurant that specializes in unique pancake shapes, colors, and flavors. List 3 quirky names I could name my restaurant.\",\n    max_new_tokens=100,\n    temperature=0.2,\n)\n\nprint(response.outputs[0].text)\n
"},{"location":"getting_started/#with-streaming","title":"With Streaming","text":"
from llmengine import Completion\nimport sys\nstream = Completion.create(\n    model=\"falcon-7b-instruct\",\n    prompt=\"Give me a 200 word summary on the current economic events in the US.\",\n    max_new_tokens=1000,\n    temperature=0.2,\n    stream=True\n)\n\nfor response in stream:\n    if response.output:\n        print(response.output.text, end=\"\")\n        sys.stdout.flush()\n
"},{"location":"model_zoo/","title":"Public Model Zoo","text":"

Scale hosts the following models in a model zoo:

Model Name Inference APIs Available Fine-tuning APIs Available llama-7b \u2705 \u2705 falcon-7b \u2705 falcon-7b-instruct \u2705 falcon-40b \u2705 falcon-40b-instruct \u2705 mpt-7b \u2705 mpt-7b-instruct \u2705 \u2705 flan-t5-xxl \u2705"},{"location":"model_zoo/#usage","title":"Usage","text":"

Each of these models can be used with the Completion API.

The specified models can be fine-tuned with the FineTune API.

More information about the models can be found using the Model API.

"},{"location":"api/data_types/","title":"\ud83d\udc0d Python Client Data Type Reference","text":""},{"location":"api/data_types/#llmengine.CompletionOutput","title":"CompletionOutput","text":"

Bases: BaseModel

Represents the output of a completion request to a model.

"},{"location":"api/data_types/#llmengine.data_types.CompletionOutput.text","title":"text instance-attribute","text":"
text: str\n

The text of the completion.

"},{"location":"api/data_types/#llmengine.data_types.CompletionOutput.num_completion_tokens","title":"num_completion_tokens instance-attribute","text":"
num_completion_tokens: int\n

Number of tokens in the completion.

"},{"location":"api/data_types/#llmengine.CompletionStreamOutput","title":"CompletionStreamOutput","text":"

Bases: BaseModel

"},{"location":"api/data_types/#llmengine.data_types.CompletionStreamOutput.text","title":"text instance-attribute","text":"
text: str\n

The text of the completion.

"},{"location":"api/data_types/#llmengine.data_types.CompletionStreamOutput.finished","title":"finished instance-attribute","text":"
finished: bool\n

Whether the completion is finished.

"},{"location":"api/data_types/#llmengine.data_types.CompletionStreamOutput.num_completion_tokens","title":"num_completion_tokens class-attribute instance-attribute","text":"
num_completion_tokens: Optional[int] = None\n

Number of tokens in the completion.

"},{"location":"api/data_types/#llmengine.CompletionSyncResponse","title":"CompletionSyncResponse","text":"

Bases: BaseModel

Response object for a synchronous prompt completion.

"},{"location":"api/data_types/#llmengine.data_types.CompletionSyncResponse.output","title":"output instance-attribute","text":"
output: CompletionOutput\n

Completion output.

"},{"location":"api/data_types/#llmengine.data_types.CompletionSyncResponse.request_id","title":"request_id instance-attribute","text":"
request_id: str\n

Unique ID of request.

"},{"location":"api/data_types/#llmengine.CompletionStreamResponse","title":"CompletionStreamResponse","text":"

Bases: BaseModel

Response object for a stream prompt completion task.

"},{"location":"api/data_types/#llmengine.data_types.CompletionStreamResponse.output","title":"output class-attribute instance-attribute","text":"
output: Optional[CompletionStreamOutput] = None\n

Completion output.

"},{"location":"api/data_types/#llmengine.data_types.CompletionStreamResponse.request_id","title":"request_id instance-attribute","text":"
request_id: str\n

Unique ID of request.

"},{"location":"api/data_types/#llmengine.CreateFineTuneRequest","title":"CreateFineTuneRequest","text":"

Bases: BaseModel

Request object for creating a FineTune.

"},{"location":"api/data_types/#llmengine.data_types.CreateFineTuneRequest.model","title":"model class-attribute instance-attribute","text":"
model: str = Field(\n    ...,\n    description=\"Identifier of base model to train from.\",\n)\n

Identifier of base model to train from.

"},{"location":"api/data_types/#llmengine.data_types.CreateFineTuneRequest.training_file","title":"training_file class-attribute instance-attribute","text":"
training_file: str = Field(\n    ...,\n    description=\"Path to file of training dataset. Dataset must be a csv with columns 'prompt' and 'response'.\",\n)\n

Path to file of training dataset. Dataset must be a csv with columns 'prompt' and 'response'.

"},{"location":"api/data_types/#llmengine.data_types.CreateFineTuneRequest.validation_file","title":"validation_file class-attribute instance-attribute","text":"
validation_file: Optional[str] = Field(\n    default=None,\n    description=\"Path to file of validation dataset. Has the same format as training_file. If not provided, we will generate a split from the training dataset.\",\n)\n

Path to file of validation dataset. Has the same format as training_file. If not provided, we will generate a split from the training dataset.

"},{"location":"api/data_types/#llmengine.data_types.CreateFineTuneRequest.hyperparameters","title":"hyperparameters class-attribute instance-attribute","text":"
hyperparameters: Optional[Dict[str, Any]] = Field(\n    default=None,\n    description=\"Hyperparameters to pass in to training job.\",\n)\n

Hyperparameters to pass in to training job.

"},{"location":"api/data_types/#llmengine.data_types.CreateFineTuneRequest.suffix","title":"suffix class-attribute instance-attribute","text":"
suffix: Optional[str] = Field(\n    default=None,\n    description=\"Optional user-provided identifier suffix for the fine-tuned model.\",\n)\n

Optional user-provided identifier suffix for the fine-tuned model.

"},{"location":"api/data_types/#llmengine.CreateFineTuneResponse","title":"CreateFineTuneResponse","text":"

Bases: BaseModel

Response object for creating a FineTune.

"},{"location":"api/data_types/#llmengine.data_types.CreateFineTuneResponse.fine_tune_id","title":"fine_tune_id class-attribute instance-attribute","text":"
fine_tune_id: str = Field(\n    ..., description=\"ID of the created fine-tuning job.\"\n)\n

The ID of the FineTune.

"},{"location":"api/data_types/#llmengine.GetFineTuneResponse","title":"GetFineTuneResponse","text":"

Bases: BaseModel

Response object for retrieving a FineTune.

"},{"location":"api/data_types/#llmengine.data_types.GetFineTuneResponse.fine_tune_id","title":"fine_tune_id class-attribute instance-attribute","text":"
fine_tune_id: str = Field(\n    ..., description=\"ID of the requested job.\"\n)\n

The ID of the FineTune.

"},{"location":"api/data_types/#llmengine.data_types.GetFineTuneResponse.status","title":"status class-attribute instance-attribute","text":"
status: BatchJobStatus = Field(\n    ..., description=\"Status of the requested job.\"\n)\n

The status of the FineTune job.

"},{"location":"api/data_types/#llmengine.ListFineTunesResponse","title":"ListFineTunesResponse","text":"

Bases: BaseModel

Response object for listing FineTunes.

"},{"location":"api/data_types/#llmengine.data_types.ListFineTunesResponse.jobs","title":"jobs class-attribute instance-attribute","text":"
jobs: List[GetFineTuneResponse] = Field(\n    ...,\n    description=\"List of fine-tuning jobs and their statuses.\",\n)\n

A list of FineTunes, represented as GetFineTuneResponses.

"},{"location":"api/data_types/#llmengine.CancelFineTuneResponse","title":"CancelFineTuneResponse","text":"

Bases: BaseModel

Response object for cancelling a FineTune.

"},{"location":"api/data_types/#llmengine.data_types.CancelFineTuneResponse.success","title":"success class-attribute instance-attribute","text":"
success: bool = Field(\n    ..., description=\"Whether cancellation was successful.\"\n)\n

Whether the cancellation succeeded.

"},{"location":"api/data_types/#llmengine.GetLLMEndpointResponse","title":"GetLLMEndpointResponse","text":"

Bases: BaseModel

Response object for retrieving a Model.

"},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.name","title":"name class-attribute instance-attribute","text":"
name: str = Field(\n    description=\"The name of the model. Use this for making inference requests to the model.\"\n)\n

The name of the model. Use this for making inference requests to the model.

"},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.source","title":"source class-attribute instance-attribute","text":"
source: LLMSource = Field(\n    description=\"The source of the model, e.g. Hugging Face.\"\n)\n

The source of the model, e.g. Hugging Face.

"},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.inference_framework","title":"inference_framework class-attribute instance-attribute","text":"
inference_framework: LLMInferenceFramework = Field(\n    description=\"The inference framework used by the model.\"\n)\n

The inference framework used by the model.

"},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.id","title":"id class-attribute instance-attribute","text":"
id: Optional[str] = Field(\n    default=None,\n    description=\"(For self-hosted users) The autogenerated ID of the model.\",\n)\n

(For self-hosted users) The autogenerated ID of the model.

"},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.model_name","title":"model_name class-attribute instance-attribute","text":"
model_name: Optional[str] = Field(\n    default=None,\n    description=\"(For self-hosted users) For fine-tuned models, the base model. For base models, this will be the same as `name`.\",\n)\n

(For self-hosted users) For fine-tuned models, the base model. For base models, this will be the same as name.

"},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.inference_framework_tag","title":"inference_framework_tag class-attribute instance-attribute","text":"
inference_framework_tag: Optional[str] = Field(\n    default=None,\n    description=\"(For self-hosted users) The Docker image tag used to run the model.\",\n)\n

(For self-hosted users) The Docker image tag used to run the model.

"},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.num_shards","title":"num_shards class-attribute instance-attribute","text":"
num_shards: Optional[int] = Field(\n    default=None,\n    description=\"(For self-hosted users) The number of shards.\",\n)\n

(For self-hosted users) The number of shards.

"},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.quantize","title":"quantize class-attribute instance-attribute","text":"
quantize: Optional[Quantization] = Field(\n    default=None,\n    description=\"(For self-hosted users) The quantization method.\",\n)\n

(For self-hosted users) The quantization method.

"},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.spec","title":"spec class-attribute instance-attribute","text":"
spec: Optional[GetModelEndpointResponse] = Field(\n    default=None,\n    description=\"(For self-hosted users) Model endpoint details.\",\n)\n

(For self-hosted users) Model endpoint details.

"},{"location":"api/data_types/#llmengine.ListLLMEndpointsResponse","title":"ListLLMEndpointsResponse","text":"

Bases: BaseModel

Response object for listing Models.

"},{"location":"api/data_types/#llmengine.data_types.ListLLMEndpointsResponse.model_endpoints","title":"model_endpoints class-attribute instance-attribute","text":"
model_endpoints: List[GetLLMEndpointResponse] = Field(\n    ..., description=\"The list of models.\"\n)\n

A list of Models, represented as GetLLMEndpointResponses.

"},{"location":"api/data_types/#llmengine.DeleteLLMEndpointResponse","title":"DeleteLLMEndpointResponse","text":"

Bases: BaseModel

Response object for deleting a Model.

"},{"location":"api/data_types/#llmengine.data_types.DeleteLLMEndpointResponse.deleted","title":"deleted class-attribute instance-attribute","text":"
deleted: bool = Field(\n    ..., description=\"Whether deletion was successful.\"\n)\n

Whether the deletion succeeded.

"},{"location":"api/error_handling/","title":"Error handling","text":"

LLM Engine uses conventional HTTP response codes to indicate the success or failure of an API request. In general: codes in the 2xx range indicate success. Codes in the 4xx range indicate indicate an error that failed given the information provided (e.g. a given Model was not found, or an invalid temperature was specified). Codes in the 5xx range indicate an error with the LLM Engine servers.

In the Python client, errors are presented via a set of corresponding Exception classes, which should be caught and handled by the user accordingly.

"},{"location":"api/error_handling/#llmengine.errors.BadRequestError","title":"BadRequestError","text":"
BadRequestError(message: str)\n

Bases: Exception

Corresponds to HTTP 400. Indicates that the request had inputs that were invalid. The user should not attempt to retry the request without changing the inputs.

"},{"location":"api/error_handling/#llmengine.errors.UnauthorizedError","title":"UnauthorizedError","text":"
UnauthorizedError(message: str)\n

Bases: Exception

Corresponds to HTTP 401. This means that no valid API key was provided.

"},{"location":"api/error_handling/#llmengine.errors.NotFoundError","title":"NotFoundError","text":"
NotFoundError(message: str)\n

Bases: Exception

Corresponds to HTTP 404. This means that the resource (e.g. a Model, FineTune, etc.) could not be found. Note that this can also be returned in some cases where the object might exist, but the user does not have access to the object. This is done to avoid leaking information about the existence or nonexistence of said object that the user does not have access to.

"},{"location":"api/error_handling/#llmengine.errors.RateLimitExceededError","title":"RateLimitExceededError","text":"
RateLimitExceededError(message: str)\n

Bases: Exception

Corresponds to HTTP 429. Too many requests hit the API too quickly. We recommend an exponential backoff for retries.

"},{"location":"api/error_handling/#llmengine.errors.ServerError","title":"ServerError","text":"
ServerError(status_code: int, message: str)\n

Bases: Exception

Corresponds to HTTP 5xx errors on the server.

"},{"location":"api/langchain/","title":"\ud83e\udd9c Langchain","text":"

Coming soon!

"},{"location":"api/python_client/","title":"\ud83d\udc0d Python Client API Reference","text":""},{"location":"api/python_client/#llmengine.Completion","title":"Completion","text":"

Bases: APIEngine

Completion API. This API is used to generate text completions.

Language models are trained to understand natural language and predict text outputs as a response to their inputs. The inputs are called prompts and the outputs are referred to as completions. LLMs take the input prompts and chunk them into smaller units called tokens to process and generate language. Tokens may include trailing spaces and even sub-words; this process is language dependent.

The Completion API can be run either synchronous or asynchronously (via Python asyncio). For each of these modes, you can also choose whether to stream token responses or not.

"},{"location":"api/python_client/#llmengine.completion.Completion.create","title":"create classmethod","text":"
create(\n    model: str,\n    prompt: str,\n    max_new_tokens: int = 20,\n    temperature: float = 0.2,\n    timeout: int = 10,\n    stream: bool = False,\n) -> Union[\n    CompletionSyncResponse,\n    Iterator[CompletionStreamResponse],\n]\n

Creates a completion for the provided prompt and parameters synchronously.

This API can be used to get the LLM to generate a completion synchronously. It takes as parameters the modelsee Model Zoo and the prompt. Optionally it takes max_new_tokens, temperature, timeout and stream. It returns CompletionSyncV1Response if stream=False or an async iterator of CompletionStreamV1Response with request_id and outputs fields.

Parameters:

Name Type Description Default model str

Name of the model to use. See Model Zoo for a list of Models that are supported.

required prompt str

The prompt to generate completions for, encoded as a string.

required max_new_tokens int

The maximum number of tokens to generate in the completion.

The token count of your prompt plus max_new_tokens cannot exceed the model's context length. See Model Zoo for information on each supported model's context length.

20 temperature float

What sampling temperature to use, in the range (0, 1]. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.

0.2 timeout int

Timeout in seconds. This is the maximum amount of time you are willing to wait for a response.

10 stream bool

Whether to stream the response. If true, the return type is an Iterator[CompletionStreamResponse]. Otherwise, the return type is a CompletionSyncResponse. When streaming, tokens will be sent as data-only server-sent events.

False

Returns:

Name Type Description response Union[CompletionSyncResponse, AsyncIterable[CompletionStreamResponse]]

The generated response (if stream=False) or iterator of response chunks (if stream=True)

Token streaming can be used to reduce percieved latency for applications:

Synchronous completion without token streaming in pythonResponse in json
from llmengine import Completion\n\nresponse = Completion.create(\n    model=\"llama-7b\",\n    prompt=\"Hello, my name is\",\n    max_new_tokens=10,\n    temperature=0.2,\n)\nprint(response.json())\n
{\n    \"request_id\": \"0123456789\",\n    \"outputs\":\n    [\n        {\n            \"text\": \"_______ and I am a _______\",\n            \"num_completion_tokens\": 10\n}\n    ],\n    \"traceback\": null\n}\n

Here is how applications can use streaming:

Synchronous completion with token streaming in pythonResponse in json
from llmengine import Completion\n\nstream = Completion.create(\n    model=\"llama-7b\",\n    prompt=\"why is the sky blue?\",\n    max_new_tokens=5,\n    temperature=0.2,\n    stream=True,\n)\n\nfor response in stream:\n    if response.output:\n        print(response.json())\n
{\"request_id\": \"0123456789\", \"output\": {\"text\": \"\\n\", \"finished\": false, \"num_completion_tokens\": 1 } }\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \"I\", \"finished\": false, \"num_completion_tokens\": 2 } }\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \" don\", \"finished\": false, \"num_completion_tokens\": 3 } }\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \"\u2019\", \"finished\": false, \"num_completion_tokens\": 4 } }\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \"t\", \"finished\": true, \"num_completion_tokens\": 5 } }\n
"},{"location":"api/python_client/#llmengine.completion.Completion.acreate","title":"acreate async classmethod","text":"
acreate(\n    model: str,\n    prompt: str,\n    max_new_tokens: int = 20,\n    temperature: float = 0.2,\n    timeout: int = 10,\n    stream: bool = False,\n) -> Union[\n    CompletionSyncResponse,\n    AsyncIterable[CompletionStreamResponse],\n]\n

Creates a completion for the provided prompt and parameters asynchronously (with asyncio).

This API can be used to get the LLM to generate a completion asynchronously. It takes as parameters the modelsee Model Zoo and the prompt. Optionally it takes max_new_tokens, temperature, timeout and stream. It returns CompletionSyncV1Response if stream=False or an async iterator of CompletionStreamV1Response with request_id and outputs fields.

Parameters:

Name Type Description Default model str

Name of the model to use. See Model Zoo for a list of Models that are supported.

required prompt str

The prompt to generate completions for, encoded as a string.

required max_new_tokens int

The maximum number of tokens to generate in the completion.

The token count of your prompt plus max_new_tokens cannot exceed the model's context length. See Model Zoo for information on each supported model's context length.

20 temperature float

What sampling temperature to use, in the range (0, 1]. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.

0.2 timeout int

Timeout in seconds. This is the maximum amount of time you are willing to wait for a response.

10 stream bool

Whether to stream the response. If true, the return type is an Iterator[CompletionStreamResponse]. Otherwise, the return type is a CompletionSyncResponse. When streaming, tokens will be sent as data-only server-sent events.

False

Returns:

Name Type Description response Union[CompletionSyncResponse, AsyncIterable[CompletionStreamResponse]]

The generated response (if stream=False) or iterator of response chunks (if stream=True)

Token streaming can be used to reduce percieved latency for applications:

Asynchronous completion without token streaming in pythonResponse in json
import asyncio\nfrom llmengine import Completion\n\nasync def main():\n    response = await Completion.acreate(\n        model=\"llama-7b\",\n        prompt=\"Hello, my name is\",\n        max_new_tokens=10,\n        temperature=0.2,\n    )\n    print(response.json())\n\nasyncio.run(main())\n
{\n    \"request_id\": \"b1b2c3d4e5f6g7h8i9j0\",\n    \"outputs\":\n    [\n        {\n            \"text\": \"_______, and I am a _____\",\n            \"num_completion_tokens\": 10\n}\n    ],\n}\n

Here is how applications can use streaming:

Asynchronous completion with token streaming in pythonResponse in json
import asyncio\nfrom llmengine import Completion\n\nasync def main():\n    stream = await Completion.acreate(\n        model=\"llama-7b\",\n        prompt=\"why is the sky blue?\",\n        max_new_tokens=5,\n        temperature=0.2,\n        stream=True,\n    )\n\nasync for response in stream:\n        if response.output:\n            print(response.json())\n\nasyncio.run(main())\n
{\"request_id\": \"0123456789\", \"output\": {\"text\": \"\\n\", \"finished\": false, \"num_completion_tokens\": 1}}\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \"I\", \"finished\": false, \"num_completion_tokens\": 2}}\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \" think\", \"finished\": false, \"num_completion_tokens\": 3}}\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \" the\", \"finished\": false, \"num_completion_tokens\": 4}}\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \" sky\", \"finished\": true, \"num_completion_tokens\": 5}}\n
"},{"location":"api/python_client/#llmengine.FineTune","title":"FineTune","text":"

Bases: APIEngine

FineTune API. This API is used to fine-tune models.

Fine-tuning is a process where the LLM is further trained on a task-specific dataset, allowing the model to adjust its parameters to better align with the task at hand. Fine-tuning is a supervised training phase, where prompt/response pairs are provided to optimize the performance of the LLM.

LLM Engine provides APIs to create fine-tunes on a base model with training & validation datasets. APIs are also provided to list, cancel and retrieve fine-tuning jobs.

Creating a fine-tune will end with the creation of a Model, which you can view using Model.get(model_name) or delete using Model.delete(model_name).

"},{"location":"api/python_client/#llmengine.fine_tuning.FineTune.create","title":"create classmethod","text":"
create(\n    model: str,\n    training_file: str,\n    validation_file: Optional[str] = None,\n    hyperparameters: Optional[Dict[str, str]] = None,\n    suffix: Optional[str] = None,\n) -> CreateFineTuneResponse\n

Creates a job that fine-tunes a specified model from a given dataset.

This API can be used to fine-tune a model. The model is the name of base model (Model Zoo for available models) to fine-tune. The training file should consist of prompt and response pairs. Your data must be formatted as a CSV file that includes two columns: prompt and response. A maximum of 100,000 rows of data is currently supported. At least 200 rows of data is recommended to start to see benefits from fine-tuning.

Parameters:

Name Type Description Default model `str`

The name of the base model to fine-tune. See Model Zoo for the list of available models to fine-tune.

required training_file `str`

Publicly accessible URL to a CSV file for training.

required validation_file `Optional[str]`

Publicly accessible URL to a CSV file for validation.

None hyperparameters `Optional[Dict[str, str]]`

A dict of hyperparameters to customize fine-tuning behavior.

Currently supported hyperparameters:

  • lr: Peak learning rate used during fine-tuning. It decays with a cosine schedule afterward. (Default: 2e-5)
  • warmup_ratio: Ratio of training steps used for learning rate warmup. (Default: 0.03)
  • epochs: Number of fine-tuning epochs. (Default: 5)
  • weight_decay: Regularization penalty applied to learned weights. (Default: 0.001)
  • max_seq_length: Maximum number of tokens per sequence in the dataset. (Default: 1024)
None suffix `Optional[str]`

A string that will be added to your fine-tuned model name.

None

Returns:

Name Type Description CreateFineTuneResponse CreateFineTuneResponse

an object that contains the ID of the created fine-tuning job

Here is an example script to create a 5-row CSV of properly formatted data for fine-tuning an airline question answering bot:

Formatting data in python
import csv\n# Define data\ndata = [\n  (\"What is your policy on carry-on luggage?\", \"Our policy allows each passenger to bring one piece of carry-on luggage and one personal item such as a purse or briefcase. The maximum size for carry-on luggage is 22 x 14 x 9 inches.\"),\n  (\"How can I change my flight?\", \"You can change your flight through our website or mobile app. Go to 'Manage my booking' section, enter your booking reference and last name, then follow the prompts to change your flight.\"),\n  (\"What meals are available on my flight?\", \"We offer a variety of meals depending on the flight's duration and route. These can range from snacks and light refreshments to full-course meals on long-haul flights. Specific meal options can be viewed during the booking process.\"),\n  (\"How early should I arrive at the airport before my flight?\", \"We recommend arriving at least two hours before domestic flights and three hours before international flights.\"),\n  \"Can I select my seat in advance?\", \"Yes, you can select your seat during the booking process or afterwards via the 'Manage my booking' section on our website or mobile app.\"),\n  ]\n\n# Write data to a CSV file\nwith open('customer_service_data.csv', 'w', newline='') as file:\n    writer = csv.writer(file)\n    writer.writerow([\"prompt\", \"response\"])\n    writer.writerows(data)\n

Example code for fine-tuning:

Fine-tuning in pythonResponse in json
from llmengine import FineTune\n\nresponse = FineTune.create(\n    model=\"llama-7b\",\n    training_file=\"https://my-bucket.s3.us-west-2.amazonaws.com/path/to/training-file.csv\",\n)\n\nprint(response.json())\n
{\n    \"fine_tune_id\": \"ft_abc123\"\n}\n
"},{"location":"api/python_client/#llmengine.fine_tuning.FineTune.get","title":"get classmethod","text":"
get(fine_tune_id: str) -> GetFineTuneResponse\n

Get status of a fine-tuning job.

This API can be used to get the status of an already running fine-tuning job. It takes as a single parameter the fine_tune_id and returns a GetFineTuneResponse object with the id and status (PENDING, STARTED, UNDEFINED, FAILURE or SUCCESS).

Parameters:

Name Type Description Default fine_tune_id `str`

ID of the fine-tuning job

required

Returns:

Name Type Description GetFineTuneResponse GetFineTuneResponse

an object that contains the ID and status of the requested job

Getting status of fine-tuning in pythonResponse in json
from llmengine import FineTune\n\nresponse = FineTune.get(\n    fine_tune_id=\"ft_abc123\",\n)\n\nprint(response.json())\n
{\n    \"fine_tune_id\": \"ft_abc123\",\n    \"status\": \"STARTED\"\n}\n
"},{"location":"api/python_client/#llmengine.fine_tuning.FineTune.list","title":"list classmethod","text":"
list() -> ListFineTunesResponse\n

List fine-tuning jobs.

This API can be used to list all the fine-tuning jobs. It returns a list of pairs of fine_tune_id and status for all existing jobs.

Returns:

Name Type Description ListFineTunesResponse ListFineTunesResponse

an object that contains a list of all fine-tuning jobs and their statuses

Listing fine-tuning jobs in pythonResponse in json
from llmengine import FineTune\n\nresponse = FineTune.list()\nprint(response.json())\n
{\n    \"jobs\": [\n        {\n            \"fine_tune_id\": \"ft_abc123\",\n            \"status\": \"STARTED\"\n},\n        {\n            \"fine_tune_id\": \"ft_def456\",\n            \"status\": \"SUCCESS\"\n}\n    ]\n}\n
"},{"location":"api/python_client/#llmengine.fine_tuning.FineTune.cancel","title":"cancel classmethod","text":"
cancel(fine_tune_id: str) -> CancelFineTuneResponse\n

Cancel a fine-tuning job.

This API can be used to cancel an existing fine-tuning job if it's no longer required. It takes as parameter the fine_tune_id and returns a response object which has a success field confirming if the cancellation was successful.

Parameters:

Name Type Description Default fine_tune_id `str`

ID of the fine-tuning job

required

Returns:

Name Type Description CancelFineTuneResponse CancelFineTuneResponse

an object that contains whether the cancellation was successful

Cancelling fine-tuning job in pythonResponse in json
from llmengine import FineTune\n\nresponse = FineTune.cancel(fine_tune_id=\"ft_abc123\")\nprint(response.json())\n
{\n    \"success\": true\n}\n
"},{"location":"api/python_client/#llmengine.Model","title":"Model","text":"

Bases: APIEngine

Model API. This API is used to get, list, and delete models. Models include both base models built into LLM Engine, and fine-tuned models that you create through the FineTune.create() API.

See Model Zoo for the list of publicly available base models.

"},{"location":"api/python_client/#llmengine.model.Model.get","title":"get classmethod","text":"
get(model: str) -> GetLLMEndpointResponse\n

Get information about an LLM model.

This API can be used to get information about a Model's source and inference framework. For self-hosted users, it returns additional information about number of shards, quantization, infra settings, etc. The function takes as a single parameter the name model and returns a GetLLMEndpointResponse object.

Parameters:

Name Type Description Default model `str`

Name of the model

required

Returns:

Name Type Description GetLLMEndpointResponse GetLLMEndpointResponse

object representing the LLM and configurations

Accessing model in pythonResponse in json
from llmengine import Model\n\nresponse = Model.get(\"llama-7b.suffix.2023-07-18-12-00-00\")\n\nprint(response.json())\n
{\n    \"id\": null,\n    \"name\": \"llama-7b.suffix.2023-07-18-12-00-00\",\n    \"model_name\": null,\n    \"source\": \"hugging_face\",\n    \"inference_framework\": \"text_generation_inference\",\n    \"inference_framework_tag\": null,\n    \"num_shards\": null,\n    \"quantize\": null,\n    \"spec\": null\n}\n
"},{"location":"api/python_client/#llmengine.model.Model.list","title":"list classmethod","text":"
list() -> ListLLMEndpointsResponse\n

List LLM models available to call inference on.

This API can be used to list all available models, including both publicly available models and user-created fine-tuned models. It returns a list of GetLLMEndpointResponse objects for all models. The most important field is the model name.

Returns:

Name Type Description ListLLMEndpointsResponse ListLLMEndpointsResponse

list of models

Listing available modes in pythonResponse in json
from llmengine import Model\n\nresponse = Model.list()\nprint(response.json())\n
{\n    \"model_endpoints\": [\n        {\n            \"id\": null,\n            \"name\": \"llama-7b.suffix.2023-07-18-12-00-00\",\n            \"model_name\": null,\n            \"source\": \"hugging_face\",\n            \"inference_framework\": \"text_generation_inference\",\n            \"inference_framework_tag\": null,\n            \"num_shards\": null,\n            \"quantize\": null,\n            \"spec\": null\n},\n        {\n            \"id\": null,\n            \"name\": \"llama-7b\",\n            \"model_name\": null,\n            \"source\": \"hugging_face\",\n            \"inference_framework\": \"text_generation_inference\",\n            \"inference_framework_tag\": null,\n            \"num_shards\": null,\n            \"quantize\": null,\n            \"spec\": null\n},\n        {\n            \"id\": null,\n            \"name\": \"llama-13b-deepspeed-sync\",\n            \"model_name\": null,\n            \"source\": \"hugging_face\",\n            \"inference_framework\": \"deepspeed\",\n            \"inference_framework_tag\": null,\n            \"num_shards\": null,\n            \"quantize\": null,\n            \"spec\": null\n},\n        {\n            \"id\": null,\n            \"name\": \"falcon-40b\",\n            \"model_name\": null,\n            \"source\": \"hugging_face\",\n            \"inference_framework\": \"text_generation_inference\",\n            \"inference_framework_tag\": null,\n            \"num_shards\": null,\n            \"quantize\": null,\n            \"spec\": null\n}\n    ]\n}\n
"},{"location":"api/python_client/#llmengine.model.Model.delete","title":"delete classmethod","text":"
delete(model: str) -> DeleteLLMEndpointResponse\n

Deletes an LLM model.

This API can be used to delete a fine-tuned model. It takes as parameter the name of the model and returns a response object which has a deleted field confirming if the deletion was successful. If called on a base model included with LLM Engine, an error will be thrown.

Parameters:

Name Type Description Default model `str`

Name of the model

required

Returns:

Name Type Description response DeleteLLMEndpointResponse

whether the model was successfully deleted

Deleting model in pythonResponse in json
from llmengine import Model\n\nresponse = Model.delete(\"llama-7b.suffix.2023-07-18-12-00-00\")\nprint(response.json())\n
{\n    \"deleted\": true\n}\n
"},{"location":"guides/completions/","title":"Completions","text":"

Language Models are trained to understand natural language and provide text outputs as a response to their inputs. The inputs are called prompts and outputs are referred to as completions. LLMs take the input prompts and chunk them smaller units called tokens to process and generate language. Tokens may include trailing spaces and even sub-words, this process is language dependent.

Scale LLM Engine provides access to open source language models (see Model Zoo) that can be used for producing completions to prompts.

"},{"location":"guides/completions/#completion-api-call","title":"Completion API call","text":"

An example API call looks as follows:

from llmengine import Completion\n\nresponse = Completion.create(\n    model=\"llama-7b\",\n    prompt=\"Hello, my name is\",\n    max_new_tokens=10,\n    temperature=0.2,\n)\n

The model_name is the LLM to be used (see Model Zoo). The prompt is the main input for the LLM to respond to. The max_new_tokens parameter is the maximum number of tokens to generate in the chat completion. The temperature is the sampling temperature to use. Higher values make the output more random, while lower values will make it more focussed and deterministic.

See the full API reference documentation to learn more.

"},{"location":"guides/completions/#completion-api-response","title":"Completion API response","text":"

An example Completion API response looks as follows:

Response in jsonResponse in python
{\n  \"outputs\": [\n    {\n      \"text\": \"_______ and I am a _______\",\n      \"num_completion_tokens\": 10\n}\n  ]\n}\n
>>> print( response.outputs[0].text )\n________. Hello, my name is _\n
"},{"location":"guides/completions/#token-streaming","title":"Token streaming","text":"

The Completions API support token streaming to reduce perceived latency for certain applications. When streaming, tokens will be sent as data-only server-side events.

To enable token streaming, pass stream=True to either Completion.create or Completion.acreate.

An example of token streaming using the synchronous Completions API looks as follows

Token streaming with synchronous API in python
from llmengine import Completion\n\nstream = Completion.create(\n    model=\"llama-7b\",\n    prompt=\"why is the sky blue?\",\n    max_new_tokens=5,\n    temperature=0.2,\n    stream=True,\n)\n\nfor response in stream:\n    if response.output:\n        print(response.json())\n
"},{"location":"guides/completions/#async-requests","title":"Async requests","text":"

The Python client supports asyncio for creating Completions. Use Completion.acreate instead of Completion.create to utilize async processing. The function signatures are otherwise identical.

An example of async Completions looks as follows

Completions with asynchronous API in python
import asyncio\nfrom llmengine import Completion\n\nasync def main():\n    response = await Completion.acreate(\n        model=\"llama-7b\",\n        prompt=\"Hello, my name is\",\n        max_new_tokens=10,\n        temperature=0.2,\n    )\n    print(response.json())\n\nasyncio.run(main())\n
"},{"location":"guides/completions/#which-model-should-i-use","title":"Which model should I use?","text":"

See the Model Zoo for more information on best practices for which model to use for Completions.

"},{"location":"guides/fine_tuning/","title":"Fine-tuning","text":"

Learn how to customize your models on your data with fine-tuning.

"},{"location":"guides/fine_tuning/#introduction","title":"Introduction","text":"

Fine-tuning helps improve model performance by training on specific examples of prompts and desired responses. LLMs are initially trained on data collected from the entire internet. With fine-tuning, LLMs can be optimized to perform better in a specific domain by learning from examples for that domain. Smaller LLMs that have been fine-tuned on a specific use case often outperform larger ones that were trained more generally.

Fine-tuning allows for:

  1. Higher quality results than prompt engineering alone
  2. Cost savings through shorter prompts
  3. The ability to reach equivalent accuracy with a smaller model
  4. Lower latency at inference time
  5. The chance to show an LLM more examples than can fit in a single context window

LLM Engine's fine-tuning API lets you fine-tune various open source LLMs on your own data and then make inference calls to the resulting LLM. For more specific details, see the fine-tuning API reference.

"},{"location":"guides/fine_tuning/#producing-high-quality-data-for-fine-tuning","title":"Producing high quality data for fine-tuning","text":"

The training data for fine-tuning should consist of prompt and response pairs.

As a rule of thumb, you should expect to see linear improvements in your fine-tuned model's quality with each doubling of the dataset size. Having high-quality data is also essential to improving performance. For every linear increase in the error rate in your training data, you may encounter a roughly quadratic increase in your fine-tuned model's error rate.

High quality data is critical to achieve improved model performance, and in several cases will require experts to generate and prepare data - the breadth and diversity of the data is highly critical. Scale's Data Engine can help prepare such high quality, diverse data sets - more information here.

"},{"location":"guides/fine_tuning/#preparing-data","title":"Preparing data","text":"

Your data must be formatted as a CSV file that includes two columns: prompt and response. A maximum of 100,000 rows of data is currently supported. At least 200 rows of data is recommended to start to see benefits from fine-tuning.

Here is an example script to create a 50-row CSV of properly formatted data for fine-tuning an airline question answering bot

Creating a sample dataset
import csv\n# Define data\ndata = [\n    (\"What is your policy on carry-on luggage?\", \"Our policy allows each passenger to bring one piece of carry-on luggage and one personal item such as a purse or briefcase. The maximum size for carry-on luggage is 22 x 14 x 9 inches.\"),\n    (\"How can I change my flight?\", \"You can change your flight through our website or mobile app. Go to 'Manage my booking' section, enter your booking reference and last name, then follow the prompts to change your flight.\"),\n    (\"What meals are available on my flight?\", \"We offer a variety of meals depending on the flight's duration and route. These can range from snacks and light refreshments to full-course meals on long-haul flights. Specific meal options can be viewed during the booking process.\"),\n    (\"How early should I arrive at the airport before my flight?\", \"We recommend arriving at least two hours before domestic flights and three hours before international flights.\"),\n    (\"Can I select my seat in advance?\", \"Yes, you can select your seat during the booking process or afterwards via the 'Manage my booking' section on our website or mobile app.\"),\n    (\"What should I do if my luggage is lost?\", \"If your luggage is lost, please report this immediately at our 'Lost and Found' counter at the airport. We will assist you in tracking your luggage.\"),\n    (\"Do you offer special assistance for passengers with disabilities?\", \"Yes, we offer special assistance for passengers with disabilities. Please notify us of your needs at least 48 hours prior to your flight.\"),\n    (\"Can I bring my pet on the flight?\", \"Yes, we allow small pets in the cabin, and larger pets in the cargo hold. Please check our pet policy for more details.\"),\n    (\"What is your policy on flight cancellations?\", \"In case of flight cancellations, we aim to notify passengers as early as possible and offer either a refund or a rebooking on the next available flight.\"),\n    (\"Can I get a refund if I cancel my flight?\", \"Refunds depend on the type of ticket purchased. Please check our cancellation policy for details. Non-refundable tickets, however, are typically not eligible for refunds unless due to extraordinary circumstances.\"),\n    (\"How can I check-in for my flight?\", \"You can check-in for your flight either online, through our mobile app, or at the airport. Online and mobile app check-in opens 24 hours before departure and closes 90 minutes before.\"),\n    (\"Do you offer free meals on your flights?\", \"Yes, we serve free meals on all long-haul flights. For short-haul flights, we offer a complimentary drink and snack. Special meal requests should be made at least 48 hours before departure.\"),\n    (\"Can I use my electronic devices during the flight?\", \"Small electronic devices can be used throughout the flight in flight mode. Larger devices like laptops may be used above 10,000 feet.\"),\n    (\"How much baggage can I check-in?\", \"The checked baggage allowance depends on the class of travel and route. The details would be mentioned on your ticket, or you can check on our website.\"),\n    (\"How can I request for a wheelchair?\", \"To request a wheelchair or any other special assistance, please call our customer service at least 48 hours before your flight.\"),\n    (\"Do I get a discount for group bookings?\", \"Yes, we offer discounts on group bookings of 10 or more passengers. Please contact our group bookings team for more information.\"),\n    (\"Do you offer Wi-fi on your flights?\", \"Yes, we offer complimentary Wi-fi on select flights. You can check the availability during the booking process.\"),\n    (\"What is the minimum connecting time between flights?\", \"The minimum connecting time varies depending on the airport and whether your flight is international or domestic. Generally, it's recommended to allow at least 45-60 minutes for domestic connections and 60-120 minutes for international.\"),\n    (\"Do you offer duty-free shopping on international flights?\", \"Yes, we have a selection of duty-free items that you can pre-order on our website or purchase onboard on international flights.\"),\n    (\"Can I upgrade my ticket to business class?\", \"Yes, you can upgrade your ticket through the 'Manage my booking' section on our website or by contacting our customer service. The availability and costs depend on the specific flight.\"),\n    (\"Can unaccompanied minors travel on your flights?\", \"Yes, we do accommodate unaccompanied minors on our flights, with special services to ensure their safety and comfort. Please contact our customer service for more details.\"),\n    (\"What amenities do you provide in business class?\", \"In business class, you will enjoy additional legroom, reclining seats, premium meals, priority boarding and disembarkation, access to our business lounge, extra baggage allowance, and personalized service.\"),\n    (\"How much does extra baggage cost?\", \"Extra baggage costs vary based on flight route and the weight of the baggage. Please refer to our 'Extra Baggage' section on the website for specific rates.\"),\n    (\"Are there any specific rules for carrying liquids in carry-on?\", \"Yes, liquids carried in your hand luggage must be in containers of 100 ml or less and they should all fit into a single, transparent, resealable plastic bag of 20 cm x 20 cm.\"),\n    (\"What if I have a medical condition that requires special assistance during the flight?\", \"We aim to make the flight comfortable for all passengers. If you have a medical condition that may require special assistance, please contact our \u2018special services\u2019 team 48 hours before your flight.\"),\n    (\"What in-flight entertainment options are available?\", \"We offer a range of in-flight entertainment options including a selection of movies, TV shows, music, and games, available on your personal seat-back screen.\"),\n    (\"What types of payment methods do you accept?\", \"We accept credit/debit cards, PayPal, bank transfers, and various other forms of payment. The available options may vary depending on the country of departure.\"),\n    (\"How can I earn and redeem frequent flyer miles?\", \"You can earn miles for every journey you take with us or our partner airlines. These miles can be redeemed for flight tickets, upgrades, or various other benefits. To earn and redeem miles, you need to join our frequent flyer program.\"),\n    (\"Can I bring a stroller for my baby?\", \"Yes, you can bring a stroller for your baby. It can be checked in for free and will normally be given back to you at the aircraft door upon arrival.\"),\n    (\"What age does my child have to be to qualify as an unaccompanied minor?\", \"Children aged between 5 and 12 years who are traveling alone are considered unaccompanied minors. Our team provides special care for these children from departure to arrival.\"),\n    (\"What documents do I need to travel internationally?\", \"For international travel, you need a valid passport and may also require visas, depending on your destination and your country of residence. It's important to check the specific requirements before you travel.\"),\n    (\"What happens if I miss my flight?\", \"If you miss your flight, please contact our customer service immediately. Depending on the circumstances, you may be able to rebook on a later flight, but additional fees may apply.\"),\n    (\"Can I travel with my musical instrument?\", \"Yes, small musical instruments can be brought on board as your one carry-on item. Larger instruments must be transported in the cargo, or if small enough, a seat may be purchased for them.\"),\n    (\"Do you offer discounts for children or infants?\", \"Yes, children aged 2-11 traveling with an adult usually receive a discount on the fare. Infants under the age of 2 who do not occupy a seat can travel for a reduced fare or sometimes for free.\"),\n    (\"Is smoking allowed on your flights?\", \"No, all our flights are non-smoking for the comfort and safety of all passengers.\"),\n    (\"Do you have family seating?\", \"Yes, we offer the option to seat families together. You can select seats during booking or afterwards through the 'Manage my booking' section on the website.\"),\n    (\"Is there any discount for senior citizens?\", \"Some flights may offer a discount for senior citizens. Please check our website or contact customer service for accurate information.\"),\n    (\"What items are prohibited on your flights?\", \"Prohibited items include, but are not limited to, sharp objects, firearms, explosive materials, and certain chemicals. You can find a comprehensive list on our website under the 'Security Regulations' section.\"),\n    (\"Can I purchase a ticket for someone else?\", \"Yes, you can purchase a ticket for someone else. You'll need their correct name as it appears on their government-issued ID, and their correct travel dates.\"),\n    (\"What is the process for lost and found items on the plane?\", \"If you realize you forgot an item on the plane, report it as soon as possible to our lost and found counter. We will make every effort to locate and return your item.\"),\n    (\"Can I request a special meal?\", \"Yes, we offer a variety of special meals to accommodate dietary restrictions. Please request your preferred meal at least 48 hours prior to your flight.\"),\n    (\"Is there a weight limit for checked baggage?\", \"Yes, luggage weight limits depend on your ticket class and route. You can find the details on your ticket or by visiting our website.\"),\n    (\"Can I bring my sports equipment?\", \"Yes, certain types of sports equipment can be carried either as or in addition to your permitted baggage. Some equipment may require additional fees. It's best to check our policy on our website or contact us directly.\"),\n    (\"Do I need a visa to travel to certain countries?\", \"Yes, visa requirements depend on the country you are visiting and your nationality. We advise checking with the relevant embassy or consulate prior to travel.\"),\n    (\"How can I add extra baggage to my booking?\", \"You can add extra baggage to your booking through the 'Manage my booking' section on our website or by contacting our customer services.\"),\n    (\"Can I check-in at the airport?\", \"Yes, you can choose to check-in at the airport. However, we also offer online and mobile check-in, which may save you time.\"),\n    (\"How do I know if my flight is delayed or cancelled?\", \"In case of any changes to your flight, we will attempt to notify all passengers using the contact information given at the time of booking. You can also check your flight status on our website.\"),\n    (\"What is your policy on pregnant passengers?\", \"Pregnant passengers can travel up to the end of the 36th week for single pregnancies, and the end of the 32nd week for multiple pregnancies. We recommend consulting your doctor before any air travel.\"),\n    (\"Can children travel alone?\", \"Yes, children age 5 to 12 can travel alone as unaccompanied minors. We provide special care for these seats. Please contact our customer service for more information.\"),\n    (\"How can I pay for my booking?\", \"You can pay for your booking using a variety of methods including credit and debit cards, PayPal, or bank transfers. The options may vary depending on the country of departure.\"),\n]\n\n# Write data to a CSV file\nwith open('customer_service_data.csv', 'w', newline='') as file:\n    writer = csv.writer(file)\n    writer.writerow([\"prompt\", \"response\"])\n    writer.writerows(data)\n
"},{"location":"guides/fine_tuning/#making-your-data-accessible-to-llm-engine","title":"Making your data accessible to LLM Engine","text":"

Currently, data needs to be uploaded to a publicly accessible web URL so that it can be read for fine-tuning. Publicly accessible HTTP, HTTPS, and S3 URLs are currently supported. Support for privately sharing data with the LLM Engine API is coming shortly. For quick iteration, you can look into tools like Pastebin or Github Gists to quickly host your CSV files in a public manner. We created an example Github Gist you can see here. To use the gist, you can just use the URL given when you click the \u201cRaw\u201d button (URL).

"},{"location":"guides/fine_tuning/#launching-the-fine-tune","title":"Launching the fine-tune","text":"

Once you have uploaded your data, you can use the LLM Engine API to launch a fine-tune. You will need to specify which base model to fine-tune, the locations of the training file and optional validation data file, an optional set of hyperparameters to customize the fine-tuning behavior, and an optional suffix to append to the name of the fine-tune.

If you specify a suffix, the fine-tune will be named model:suffix:<timestamp>. If you do not, the fine-tune will be named model:<timestamp>. The timestamp will be the time the fine-tune was launched.

Hyper-parameters for fine-tune * `lr`: Peak learning rate used during fine-tuning. It decays with a cosine schedule afterward. (Default: 2e-5) * `warmup_ratio`: Ratio of training steps used for learning rate warmup. (Default: 0.03) * `epochs`: Number of fine-tuning epochs. (Default: 5) * `weight_decay`: Regularization penalty applied to learned weights. (Default: 0.001) * `max_seq_length`: Maximum number of tokens per sequence in the dataset. (Default: 1024) Create a fine-tune in python
from llmengine import FineTune\n\nresponse = FineTune.create(\n    model=\"llama-7b\",\n    training_file=\"s3://my-bucket/path/to/training-file.csv\",\n)\n\nprint(response.json())\n

See the Model Zoo to see which models have fine-tuning support.

Once the fine-tune is launched, you can also get the status of your fine-tune.

"},{"location":"guides/fine_tuning/#making-inference-calls-to-your-fine-tune","title":"Making inference calls to your fine-tune","text":"

Once your fine-tune is finished, you will be able to start making inference requests to the model. You can use the fine_tuned_model returned from your FineTune.get API call to reference your fine-tuned model in the Completions API. Alternatively, you can list available LLMs with Model.list in order to find the name of your fine-tuned model. See the Completion API for more details. You can then use that name to direct your completion requests. You must wait until your fine-tune is complete before you can plug it into the Completions API. You can check the status of your fine-tune with FineTune.get.

Inference with a fine-tuned model in python
from llmengine import Completion\n\nresponse = Completion.create(\n    model_name=\"llama-7b:airlines:2023-07-17-08-30-45\",\n    prompt=\"Do you offer in-flight Wi-fi?\",\n    max_new_tokens=100,\n    temperature=0.2,\n)\nprint(response.json())\n
"},{"location":"guides/rate_limits/","title":"Overview","text":""},{"location":"guides/rate_limits/#what-are-rate-limits","title":"What are rate limits?","text":"

A rate limit is a restriction that an API imposes on the number of times a user or client can access the server within a specified period of time.

"},{"location":"guides/rate_limits/#how-do-i-know-if-i-am-rate-limited","title":"How do I know if I am rate limited?","text":"

Per standard HTTP practices, your request will receive a response with HTTP status code of 429, Too Many Requests.

"},{"location":"guides/rate_limits/#what-are-the-rate-limits-for-our-api","title":"What are the rate limits for our API?","text":"

The LLM Engine API is currently in a preview mode, and therefore we currently do not have any advertised rate limits. As the API moves towards a production release, we will update this section with specific rate limits. For now, the API will return HTTP 429 on an as-needed basis.

"},{"location":"guides/rate_limits/#error-mitigation","title":"Error mitigation","text":""},{"location":"guides/rate_limits/#retrying-with-exponential-backoff","title":"Retrying with exponential backoff","text":"

One easy way to avoid rate limit errors is to automatically retry requests with a random exponential backoff. Retrying with exponential backoff means performing a short sleep when a rate limit error is hit, then retrying the unsuccessful request. If the request is still unsuccessful, the sleep length is increased and the process is repeated. This continues until the request is successful or until a maximum number of retries is reached. This approach has many benefits:

  • Automatic retries means you can recover from rate limit errors without crashes or missing data
  • Exponential backoff means that your first retries can be tried quickly, while still benefiting from longer delays if your first few retries fail
  • Adding random jitter to the delay helps retries from all hitting at the same time.

Below are a few example solutions for Python that use exponential backoff.

"},{"location":"guides/rate_limits/#example-1-using-the-tenacity-library","title":"Example #1: Using the tenacity library","text":"

Tenacity is an Apache 2.0 licensed general-purpose retrying library, written in Python, to simplify the task of adding retry behavior to just about anything. To add exponential backoff to your requests, you can use the tenacity.retry decorator. The below example uses the tenacity.wait_random_exponential function to add random exponential backoff to a request.

Exponential backoff in python
import llmengine\nfrom tenacity import (\n    retry,\n    stop_after_attempt,\n    wait_random_exponential,\n)  # for exponential backoff\n@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))\ndef completion_with_backoff(**kwargs):\n    return llmengine.Completion.create(**kwargs)\n\ncompletion_with_backoff(model=\"llama-7b\", prompt=\"Why is the sky blue?\")\n
"},{"location":"guides/rate_limits/#example-2-using-the-backoff-library","title":"Example #2: Using the backoff library","text":"

Another python library that provides function decorators for backoff and retry is backoff:

Decorators for backoff and retry in python
import llmengine\nimport backoff\n@backoff.on_exception(backoff.expo, llmengine.error.RateLimitError)\ndef completions_with_backoff(**kwargs):\n    return llmengine.Completion.create(**kwargs)\n\ncompletions_with_backoff(model=\"llama-7b\", prompt=\"Why is the sky blue?\")\n
"},{"location":"guides/token_streaming/","title":"Token streaming","text":"

The Completions APIs support a stream boolean parameter that, when True, will return a streamed response of token-by-token server-sent events (SSEs) rather than waiting to receive the full response when model generation has finished. This decreases latency of when you start getting a response.

The response will consist of SSEs of the form {\"token\": dict, \"generated_text\": str | null, \"details\": dict | null}, where the dictionary for each token will contain log probability information in addition to the generated string; the generated_text field will be null for all but the last SSE, for which it will contain the full generated response.

"}]} \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz index 3b09734abc586c4cd514823baef81867ef04dc9b..e73c22d16bade771ae108e933e77502fc2e98f3e 100644 GIT binary patch delta 12 Tcmb=gXOr*d;JC*-k*yK{7)k@W delta 12 Tcmb=gXOr*d;OJtS$W{pe7J36)