diff --git a/api/data_types/index.html b/api/data_types/index.html index 9fc8be47f..ad61a62b8 100644 --- a/api/data_types/index.html +++ b/api/data_types/index.html @@ -575,6 +575,54 @@ + + +
  • + + CreateFineTuneRequest + + + +
  • @@ -662,6 +710,101 @@ +
  • + +
  • + + GetLLMEndpointResponse + + + + +
  • + +
  • + + ListLLMEndpointsResponse + + + + +
  • + +
  • + + DeleteLLMEndpointResponse + + + +
  • @@ -832,6 +975,54 @@ + + +
  • + + CreateFineTuneRequest + + + +
  • @@ -919,6 +1110,101 @@ +
  • + +
  • + + GetLLMEndpointResponse + + + + +
  • + +
  • + + ListLLMEndpointsResponse + + + + +
  • + +
  • + + DeleteLLMEndpointResponse + + + +
  • @@ -944,21 +1230,20 @@

    🐍 Python Client Data Type Referenc

    - CompletionOutput + CompletionOutput

    -

    - Bases: BaseModel

    +

    + Bases: BaseModel

    Represents the output of a completion request to a model.

    -
    @@ -974,7 +1259,7 @@

    - text + text @@ -997,7 +1282,7 @@

    - num_completion_tokens + num_completion_tokens @@ -1030,16 +1315,15 @@

    - CompletionStreamOutput + CompletionStreamOutput

    -

    - Bases: BaseModel

    - +

    + Bases: BaseModel

    @@ -1058,7 +1342,7 @@

    - text + text @@ -1081,7 +1365,7 @@

    - finished + finished @@ -1104,7 +1388,7 @@

    - num_completion_tokens + num_completion_tokens @@ -1138,21 +1422,20 @@

    - CompletionSyncV1Response + CompletionSyncV1Response

    -

    - Bases: BaseModel

    +

    + Bases: BaseModel

    Response object for a synchronous prompt completion.

    -
    @@ -1168,7 +1451,7 @@

    - output + output @@ -1191,7 +1474,7 @@

    - request_id + request_id @@ -1224,21 +1507,20 @@

    - CompletionStreamV1Response + CompletionStreamV1Response

    -

    - Bases: BaseModel

    +

    + Bases: BaseModel

    Response object for a stream prompt completion task.

    -
    @@ -1254,7 +1536,7 @@

    - output + output @@ -1278,7 +1560,7 @@

    - request_id + request_id @@ -1310,20 +1592,19 @@

    - CreateFineTuneResponse +

    + CreateFineTuneRequest -

    +

    -

    - Bases: BaseModel

    +

    + Bases: BaseModel

    -

    Response object for creating a FineTune.

    - +

    Request object for creating a FineTune.

    @@ -1340,8 +1621,8 @@

    -

    - fine_tune_id +

    + model @@ -1349,63 +1630,238 @@

    instance-attribute -

    -
    fine_tune_id: str = Field(Ellipsis, description='ID of the created fine-tuning job.')
    +

    +
    model: str = Field(
    +    ...,
    +    description="Identifier of base model to train from.",
    +)
     
    -

    The ID of the FineTune.

    +

    Identifier of base model to train from.

    +
    +

    + training_file -

    + + + class-attribute + instance-attribute + +

    +
    training_file: str = Field(
    +    ...,
    +    description="Path to file of training dataset. Dataset must be a csv with columns 'prompt' and 'response'.",
    +)
    +
    + +
    + +

    Path to file of training dataset. Dataset must be a csv with columns 'prompt' and 'response'.

    -
    - - - -

    - GetFineTuneResponse - +
    -

    -
    -

    - Bases: BaseModel

    +

    + validation_file -

    Response object for retrieving a FineTune.

    - + + class-attribute + instance-attribute + +

    +
    validation_file: Optional[str] = Field(
    +    default=None,
    +    description="Path to file of validation dataset. Has the same format as training_file. If not provided, we will generate a split from the training dataset.",
    +)
    +
    +
    +

    Path to file of validation dataset. Has the same format as training_file. If not provided, we will generate a split from the training dataset.

    +
    -
    - - +
    +
    +

    + hyperparameters -
    + + + class-attribute + instance-attribute + + +

    +
    hyperparameters: Optional[Dict[str, Any]] = Field(
    +    default=None,
    +    description="Hyperparameters to pass in to training job.",
    +)
    +
    + +
    + +

    Hyperparameters to pass in to training job.

    +
    + +
    + +
    + + + +

    + suffix + + + + class-attribute + instance-attribute + + +

    +
    suffix: Optional[str] = Field(
    +    default=None,
    +    description="Optional user-provided identifier suffix for the fine-tuned model.",
    +)
    +
    + +
    + +

    Optional user-provided identifier suffix for the fine-tuned model.

    +
    + +
    + + + + + +
    + +
    + +
    + +
    + + + +

    + CreateFineTuneResponse + + +

    + + +
    +

    + Bases: BaseModel

    + + +

    Response object for creating a FineTune.

    + + + + +
    + + + + + + + +
    + + + +

    + fine_tune_id + + + + class-attribute + instance-attribute + + +

    +
    fine_tune_id: str = Field(
    +    ..., description="ID of the created fine-tuning job."
    +)
    +
    + +
    + +

    The ID of the FineTune.

    +
    + +
    + + + + + +
    + +
    + +
    + +
    + + + +

    + GetFineTuneResponse + + +

    + + +
    +

    + Bases: BaseModel

    + + +

    Response object for retrieving a FineTune.

    + + + + +
    + + + + + + + +

    - fine_tune_id + fine_tune_id @@ -1414,7 +1870,9 @@

    -
    fine_tune_id: str = Field(Ellipsis, description='ID of the requested job.')
    +
    fine_tune_id: str = Field(
    +    ..., description="ID of the requested job."
    +)
     
    @@ -1429,7 +1887,7 @@

    - status + status @@ -1438,7 +1896,9 @@

    -
    status: BatchJobStatus = Field(Ellipsis, description='Status of the requested job.')
    +
    status: BatchJobStatus = Field(
    +    ..., description="Status of the requested job."
    +)
     
    @@ -1463,21 +1923,20 @@

    - ListFineTunesResponse + ListFineTunesResponse

    -

    - Bases: BaseModel

    +

    + Bases: BaseModel

    Response object for listing FineTunes.

    -
    @@ -1493,7 +1952,7 @@

    - jobs + jobs @@ -1502,7 +1961,10 @@

    -
    jobs: List[GetFineTuneResponse] = Field(Ellipsis, description='List of fine-tuning jobs and their statuses.')
    +
    jobs: List[GetFineTuneResponse] = Field(
    +    ...,
    +    description="List of fine-tuning jobs and their statuses.",
    +)
     
    @@ -1527,21 +1989,20 @@

    - CancelFineTuneResponse + CancelFineTuneResponse

    -

    - Bases: BaseModel

    +

    + Bases: BaseModel

    Response object for cancelling a FineTune.

    -
    @@ -1557,7 +2018,7 @@

    - success + success @@ -1566,7 +2027,9 @@

    -
    success: bool = Field(Ellipsis, description='Whether cancellation was successful.')
    +
    success: bool = Field(
    +    ..., description="Whether cancellation was successful."
    +)
     
    @@ -1580,6 +2043,329 @@

    + + + +

    + GetLLMEndpointResponse + + +

    + + +
    +

    + Bases: BaseModel

    + + +

    Response object for retrieving a Model.

    + + + + +
    + + + + + + + +
    + + + +

    + id + + + + class-attribute + instance-attribute + + +

    +
    id: str = Field(
    +    description="The autogenerated ID of the Launch endpoint."
    +)
    +
    + +
    + +

    The autogenerated ID of the Launch endpoint.

    +
    + +
    + +
    + + + +

    + name + + + + class-attribute + instance-attribute + + +

    +
    name: str = Field(
    +    description="The name of the Launch endpoint."
    +)
    +
    + +
    + +

    The name of the Launch endpoint.

    +
    + +
    + +
    + + + +

    + model_name + + + + class-attribute + instance-attribute + + +

    +
    model_name: str = Field(
    +    description="The name of the model."
    +)
    +
    + +
    + +

    The name of the model.

    +
    + +
    + +
    + + + +

    + source + + + + class-attribute + instance-attribute + + +

    +
    source: LLMSource = Field(
    +    description="The source of the model."
    +)
    +
    + +
    + +

    The source of the model.

    +
    + +
    + +
    + + + +

    + inference_framework + + + + class-attribute + instance-attribute + + +

    +
    inference_framework: LLMInferenceFramework = Field(
    +    description="The inference framework used by the endpoint."
    +)
    +
    + +
    + +

    The inference framework used by the endpoint.

    +
    + +
    + +
    + + + +

    + num_shards + + + + class-attribute + instance-attribute + + +

    +
    num_shards: int = Field(description="The number of shards.")
    +
    + +
    + +

    The number of shards.

    +
    + +
    + + + + + +
    + +
    + +
    + +
    + + + +

    + ListLLMEndpointsResponse + + +

    + + +
    +

    + Bases: BaseModel

    + + +

    Response object for listing Models.

    + + + + +
    + + + + + + + +
    + + + +

    + model_endpoints + + + + class-attribute + instance-attribute + + +

    +
    model_endpoints: List[GetLLMEndpointResponse] = Field(
    +    ..., description="The list of LLM endpoints."
    +)
    +
    + +
    + +

    A list of Models, represented as GetLLMEndpointResponses.

    +
    + +
    + + + + + +
    + +
    + +
    + +
    + + + +

    + DeleteLLMEndpointResponse + + +

    + + +
    +

    + Bases: BaseModel

    + + +

    Response object for deleting a Model.

    + + + + +
    + + + + + + + +
    + + + +

    + deleted + + + + class-attribute + instance-attribute + + +

    +
    deleted: bool = Field(
    +    ..., description="Whether deletion was successful."
    +)
    +
    + +
    + +

    Whether the deletion succeeded.

    +
    + +
    + + + + +
    diff --git a/api/error_handling/index.html b/api/error_handling/index.html index 11175dffc..c8caaf612 100644 --- a/api/error_handling/index.html +++ b/api/error_handling/index.html @@ -614,7 +614,7 @@

    Error handling - BadRequestError + BadRequestError

    @@ -622,15 +622,14 @@

    -

    - Bases: Exception

    +

    + Bases: Exception

    Corresponds to HTTP 400. Indicates that the request had inputs that were invalid. The user should not attempt to retry the request without changing the inputs.

    -
    @@ -656,7 +655,7 @@

    - UnauthorizedError + UnauthorizedError

    @@ -664,14 +663,13 @@

    -

    - Bases: Exception

    +

    + Bases: Exception

    Corresponds to HTTP 401. This means that no valid API key was provided.

    -
    @@ -697,7 +695,7 @@

    - NotFoundError + NotFoundError

    @@ -705,8 +703,8 @@

    -

    - Bases: Exception

    +

    + Bases: Exception

    Corresponds to HTTP 404. This means that the resource (e.g. a Model, FineTune, etc.) could not be found. @@ -715,7 +713,6 @@

    the user does not have access to.

    -
    @@ -741,7 +738,7 @@

    - RateLimitExceededError + RateLimitExceededError

    @@ -749,14 +746,13 @@

    -

    - Bases: Exception

    +

    + Bases: Exception

    Corresponds to HTTP 429. Too many requests hit the API too quickly. We recommend an exponential backoff for retries.

    -
    @@ -782,7 +778,7 @@

    - ServerError + ServerError

    @@ -790,14 +786,13 @@

    -

    - Bases: Exception

    +

    + Bases: Exception

    Corresponds to HTTP 5xx errors on the server.

    -
    diff --git a/api/python_client/index.html b/api/python_client/index.html index 3832ac7ea..37de9dc08 100644 --- a/api/python_client/index.html +++ b/api/python_client/index.html @@ -491,15 +491,15 @@
  • - - list() + + get()
  • - - retrieve() + + list()
  • @@ -514,6 +514,40 @@ + + +
  • + + Model + + + +
  • @@ -628,15 +662,15 @@
  • - - list() + + get()
  • - - retrieve() + + list()
  • @@ -651,6 +685,40 @@ + + +
  • + + Model + + + +
  • @@ -676,15 +744,15 @@

    🐍 Python Client API Reference - Completion + Completion

    -

    - Bases: APIEngine

    +

    + Bases: APIEngine

    Completion API. This API is used to generate text completions.

    @@ -697,7 +765,6 @@

    stream token responses or not.

    -
    @@ -710,12 +777,13 @@

    +

    - create + create @@ -723,13 +791,25 @@

    -
    create(model: str, prompt: str, max_new_tokens: int = 20, temperature: float = 0.2, timeout: int = 10, stream: bool = False) -> Union[CompletionSyncV1Response, Iterator[CompletionStreamV1Response]]
    +
    create(
    +    model: str,
    +    prompt: str,
    +    max_new_tokens: int = 20,
    +    temperature: float = 0.2,
    +    timeout: int = 10,
    +    stream: bool = False,
    +) -> Union[
    +    CompletionSyncV1Response,
    +    Iterator[CompletionStreamV1Response],
    +]
     

    Creates a completion for the provided prompt and parameters synchronously.

    + +

    Parameters:

    @@ -833,6 +913,8 @@

    + +

    Returns:

    @@ -913,12 +995,13 @@

    +

    - acreate + acreate @@ -927,13 +1010,25 @@

    -
    acreate(model: str, prompt: str, max_new_tokens: int = 20, temperature: float = 0.2, timeout: int = 10, stream: bool = False) -> Union[CompletionSyncV1Response, AsyncIterable[CompletionStreamV1Response]]
    +
    acreate(
    +    model: str,
    +    prompt: str,
    +    max_new_tokens: int = 20,
    +    temperature: float = 0.2,
    +    timeout: int = 10,
    +    stream: bool = False,
    +) -> Union[
    +    CompletionSyncV1Response,
    +    AsyncIterable[CompletionStreamV1Response],
    +]
     

    Creates a completion for the provided prompt and parameters asynchronously (with asyncio).

    + +

    Parameters:

    @@ -1037,6 +1132,8 @@

    + +

    Returns:

    @@ -1137,21 +1234,21 @@

    - FineTune + FineTune

    -

    - Bases: APIEngine

    +

    + Bases: APIEngine

    FineTune API. This API is used to fine-tune models.

    Fine-tuning is a process where the LLM is further trained on a task-specific dataset, allowing the model to adjust its parameters to better align with the task at hand. Fine-tuning involves the supervised training phase, where prompt/response pairs are provided to optimize the performance of the LLM.

    -

    Scale llm-engine provides APIs to create fine-tunes on a base-model with training & validation data-sets. APIs are also provided to list, cancel and retrieve fine-tuning jobs.

    - +

    Scale LLMEngine provides APIs to create fine-tunes on a base-model with training & validation data-sets. APIs are also provided to get, list and cancel fine-tuning jobs.and cancel fine-tuning jobs.

    +

    Creating a fine-tune will end with the creation of a Model, which you can view using Model.get(model_name) or delete using Model.delete(model_name).

    @@ -1166,12 +1263,13 @@

    +

    - create + create @@ -1179,13 +1277,21 @@

    -
    create(model: str, training_file: str, validation_file: Optional[str] = None, hyperparameters: Optional[Dict[str, str]] = None, suffix: Optional[str] = None) -> CreateFineTuneResponse
    +
    create(
    +    model: str,
    +    training_file: str,
    +    validation_file: Optional[str] = None,
    +    hyperparameters: Optional[Dict[str, str]] = None,
    +    suffix: Optional[str] = None,
    +) -> CreateFineTuneResponse
     

    Creates a job that fine-tunes a specified model from a given dataset.

    + +

    Parameters:

    @@ -1270,6 +1376,8 @@

    + +

    Returns:

    @@ -1339,12 +1447,111 @@

    + +
    + + + +

    + get + + + + classmethod + + +

    +
    get(fine_tune_id: str) -> GetFineTuneResponse
    +
    + +
    + +

    Get status of a fine-tuning job

    + + + +

    Parameters:

    +

    + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    fine_tune_id + `str` + +
    +

    ID of the fine-tuning job

    +
    +
    + required +
    + + + +

    Returns:

    + + + + + + + + + + + + + +
    Name TypeDescription
    GetFineTuneResponse + GetFineTuneResponse + +
    +

    an object that contains the ID and status of the requested job

    +
    +
    + +
    + Example +
    from llmengine import FineTune
    +
    +response = FineTune.get(
    +    fine_tune_id="ft_abc123",
    +)
    +
    +print(response.json())
    +
    +
    +
    + JSON Response +
    {
    +    "fine_tune_id": "ft_abc123",
    +    "status": "RUNNING"
    +}
    +
    +
    +
    + +
    + +

    - list + list @@ -1359,6 +1566,8 @@

    List fine-tuning jobs

    + +

    Returns:

    @@ -1391,41 +1600,46 @@

    JSON Response -
    [
    -    {
    -        "fine_tune_id": "ft_abc123",
    -        "status": "RUNNING"
    -    },
    -    {
    -        "fine_tune_id": "ft_def456",
    -        "status": "SUCCESS"
    -    }
    -]
    +  
    {
    +    "jobs": [
    +        {
    +            "fine_tune_id": "ft_abc123",
    +            "status": "RUNNING"
    +        },
    +        {
    +            "fine_tune_id": "ft_def456",
    +            "status": "SUCCESS"
    +        }
    +    ]
    +}
     
    +
    -

    - retrieve +

    + cancel classmethod -

    -
    retrieve(fine_tune_id: str) -> GetFineTuneResponse
    +

    +
    cancel(fine_tune_id: str) -> CancelFineTuneResponse
     
    -

    Get status of a fine-tuning job

    +

    Cancel a fine-tuning job

    + +

    Parameters:

    @@ -1455,6 +1669,8 @@

    + +

    Returns:

    @@ -1465,12 +1681,12 @@

    - @@ -1481,44 +1697,83 @@

    Example
    from llmengine import FineTune
     
    -response = FineTune.retrieve(
    -    fine_tune_id="ft_abc123",
    -)
    -
    -print(response.json())
    +response = FineTune.cancel(fine_tune_id="ft_abc123")
    +print(response.json())
     
    JSON Response
    {
    -    "fine_tune_id": "ft_abc123",
    -    "status": "RUNNING"
    -}
    +    "success": true
    +}
     
    + + + + + + + + +
    + + + +

    + Model + + +

    + + +
    +

    + Bases: APIEngine

    + + +

    Model API. This API is used to get, list, delete, and (in the self-hosted case) create models. When using Scale Spellbook, create models using FineTune.create().

    +

    See Model Zoo for the list of publicly available models.

    + + + + +
    + + + + + + + + + +
    -

    - cancel +

    + get classmethod -

    -
    cancel(fine_tune_id: str) -> CancelFineTuneResponse
    +

    +
    get(model_name: str) -> GetLLMEndpointResponse
     
    -

    Cancel a fine-tuning job

    +

    Get information about an LLM model endpoint.

    + +

    Parameters:

    GetFineTuneResponse - GetFineTuneResponse +CancelFineTuneResponse + CancelFineTuneResponse
    -

    an object that contains the ID and status of the requested job

    +

    an object that contains whether the cancellation was successful

    @@ -1532,13 +1787,13 @@

    - +
    fine_tune_idmodel_name `str`
    -

    ID of the fine-tuning job

    +

    Name of the model

    @@ -1548,6 +1803,8 @@

    + +

    Returns:

    @@ -1558,12 +1815,12 @@

    - @@ -1572,16 +1829,213 @@

    Example -
    from llmengine import FineTune
    +  
    from llmengine import Model
     
    -response = FineTune.cancel(fine_tune_id="ft_abc123")
    +response = Model.get("llama-7b.suffix.2023-07-18-12-00-00")
    +
    +print(response.json())
    +
    +
    +
    + JSON Response +
    {
    +    "id": "end_abc123",
    +    "name": "llama-7b.suffix.2023-07-18-12-00-00",
    +    "model_name": "llama-7b",
    +    "source": "hugging_face",
    +    "inference_framework": "text_generation_inference",
    +    "num_shards": 4
    +}
    +
    +
    + + + + + +
    + + + +

    + list + + + + classmethod + + +

    +
    list() -> ListLLMEndpointsResponse
    +
    + +
    + +

    List LLM model endpoints available to call inference on. This includes publicly available endpoints as well as your fine-tuned model endpoints.

    + + + +

    Returns:

    +

    CancelFineTuneResponse - CancelFineTuneResponse +GetLLMEndpointResponse + GetLLMEndpointResponse
    -

    an object that contains whether the cancellation was successful

    +

    object representing the LLM endpoint and configurations

    + + + + + + + + + + + + +
    Name TypeDescription
    ListLLMEndpointsResponse + ListLLMEndpointsResponse + +
    +

    list of model endpoints

    +
    +
    + +
    + Example +
    from llmengine import Model
    +
    +response = Model.list()
    +print(response.json())
    +
    +
    +
    + JSON Response +
    {
    +    "model_endpoints": [
    +        {
    +            "id": "end_abc123",
    +            "name": "llama-7b",
    +            "model_name": "llama-7b",
    +            "source": "hugging_face",
    +            "inference_framework": "text_generation_inference",
    +            "num_shards": 4
    +        },
    +        {
    +            "id": "end_def456",
    +            "name": "llama-13b-deepspeed-sync",
    +            "model_name": "llama-13b-deepspeed-sync",
    +            "source": "hugging_face",
    +            "inference_framework": "deepspeed",
    +            "num_shards": 4
    +        },
    +        {
    +            "id": "end_ghi789",
    +            "name": "falcon-40b",
    +            "model_name": "falcon-40b",
    +            "source": "hugging_face",
    +            "inference_framework": "text_generation_inference",
    +            "num_shards": 4
    +        },
    +        {
    +            "id": "end_jkl012",
    +            "name": "mpt-7b-instruct",
    +            "model_name": "mpt-7b-instruct",
    +            "source": "hugging_face",
    +            "inference_framework": "text_generation_inference",
    +            "num_shards": 4
    +        }
    +    ]
    +}
    +
    +
    +

    + +

    + + +
    + + + +

    + delete + + + + classmethod + + +

    +
    delete(model_name: str) -> DeleteLLMEndpointResponse
    +
    + +
    + +

    Deletes an LLM model endpoint.

    + + + +

    Parameters:

    + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    model_name + `str` + +
    +

    Name of the model

    +
    +
    + required +
    + + + +

    Returns:

    + + + + + + + + + + + + + +
    Name TypeDescription
    response + DeleteLLMEndpointResponse + +
    +

    whether the model was successfully deleted

    +
    +
    + +
    + Example +
    from llmengine import Model
    +
    +response = Model.delete("llama-7b.suffix.2023-07-18-12-00-00")
     print(response.json())
     
    JSON Response
    {
    -    "success": "true"
    +    "deleted": true
     }
     
    diff --git a/guides/fine_tuning/index.html b/guides/fine_tuning/index.html index 6f2a068bf..cf08c4b7b 100644 --- a/guides/fine_tuning/index.html +++ b/guides/fine_tuning/index.html @@ -724,7 +724,7 @@

    Launching the fine-tune

    See the Model Zoo to see which models have fine-tuning support.

    -

    Once the fine-tune is launched, you can also get the status of your fine-tune.

    +

    Once the fine-tune is launched, you can also get the status of your fine-tune.

    Making inference calls to your fine-tune

    Once your fine-tune is finished, you will be able to start making inference requests to the model. You can use the fine_tune_id returned from your FineTune.create API call to reference your fine-tuned model in the Completions API. Alternatively, you can list available LLMs with Model.list in order to find the name of your fine-tuned model. See the Completion API for more details. You can then use that name to direct your completion requests.

    diff --git a/index.html b/index.html index 531adf738..a2f89151f 100644 --- a/index.html +++ b/index.html @@ -597,28 +597,27 @@

    💻 Quick Install🤔 About

    -

    Foundation models are emerging as the building blocks of AI. However, deploying -these models to the cloud and fine-tuning them still requires infrastructure and -ML expertise, and can be expensive.

    +

    Foundation models are emerging as the building blocks of AI. However, +fine-tuning these models and deploying them to the cloud are expensive +operations that require infrastructure and ML expertise.

    LLM Engine is a Python library, CLI, and Helm chart that provides everything you need to fine-tune and serve foundation models in the cloud using Kubernetes. Key features include:

    -

    🚀 Ready-to-use Fine-Tuning and Inference APIs for your favorite models: -LLM Engine comes with ready-to-use APIs for your favorite -open-source models, including MPT, Falcon, and LLaMA. Use Scale-hosted endpoints -or deploy to your own infrastructure.

    +

    🎁 Ready-to-use APIs for your favorite models: +Fine-tune and serve open-source foundation models, including MPT, Falcon, +and LLaMA. Use Scale-hosted endpoints or deploy to your own infrastructure.

    🐳 Deploying from any docker image: Turn any Docker image into an auto-scaling deployment with simple APIs.

    🎙️Optimized Inference: LLM Engine provides inference APIs for streaming responses and dynamically batching inputs for higher throughput and lower latency.

    -

    🤗 Open-Source Integrations: Deploy any Huggingface +

    🤗 Open-Source Integrations: Deploy any Hugging Face model with a single command.

    🔥 Features Coming Soon

    Fast Cold-Start Times: To prevent GPUs from idling, LLM Engine automatically scales your model to zero when it's not in use and scales up within seconds, even for large foundation models.

    -

    💸 Cost-Optimized: Deploy AI models cheaper than commercial ones, +

    💸 Cost Optimization: Deploy AI models cheaper than commercial ones, including cold-start and warm-down times.

    diff --git a/model_zoo/index.html b/model_zoo/index.html index 751ac7772..f7244a312 100644 --- a/model_zoo/index.html +++ b/model_zoo/index.html @@ -557,6 +557,8 @@

    🦙 Public Model ZooCompletion API.

    The specified models can be fine-tuned with the FineTune API.

    +

    More information about the model endpoints can be found using the +Model API.

    diff --git a/objects.inv b/objects.inv index 041550f53..f92e1092c 100644 Binary files a/objects.inv and b/objects.inv differ diff --git a/search/search_index.json b/search/search_index.json index 2ecb52a44..f0b176a15 100644 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"\u26a1 LLM Engine \u26a1","text":"

    The open source engine for fine-tuning large language models. LLM Engine is the easiest way to customize and serve LLMs. Use Scale's hosted version or run it in your own cloud.

    "},{"location":"#quick-install","title":"\ud83d\udcbb Quick Install","text":"Install using pip
    pip install scale-llm-engine\n
    "},{"location":"#about","title":"\ud83e\udd14 About","text":"

    Foundation models are emerging as the building blocks of AI. However, deploying these models to the cloud and fine-tuning them still requires infrastructure and ML expertise, and can be expensive.

    LLM Engine is a Python library, CLI, and Helm chart that provides everything you need to fine-tune and serve foundation models in the cloud using Kubernetes. Key features include:

    \ud83d\ude80 Ready-to-use Fine-Tuning and Inference APIs for your favorite models: LLM Engine comes with ready-to-use APIs for your favorite open-source models, including MPT, Falcon, and LLaMA. Use Scale-hosted endpoints or deploy to your own infrastructure.

    \ud83d\udc33 Deploying from any docker image: Turn any Docker image into an auto-scaling deployment with simple APIs.

    \ud83c\udf99\ufe0fOptimized Inference: LLM Engine provides inference APIs for streaming responses and dynamically batching inputs for higher throughput and lower latency.

    \ud83e\udd17 Open-Source Integrations: Deploy any Huggingface model with a single command.

    "},{"location":"#features-coming-soon","title":"\ud83d\udd25 Features Coming Soon","text":"

    \u2744 Fast Cold-Start Times: To prevent GPUs from idling, LLM Engine automatically scales your model to zero when it's not in use and scales up within seconds, even for large foundation models.

    \ud83d\udcb8 Cost-Optimized: Deploy AI models cheaper than commercial ones, including cold-start and warm-down times.

    "},{"location":"faq/","title":"Frequently Asked Questions","text":""},{"location":"getting_started/","title":"\ud83d\ude80 Getting Started","text":"

    To start using LLM Engine's public inference and fine-tuning APIs:

    Install using pipInstall using conda
    pip install scale-llm-engine\n
    conda install scale-llm-engine -c conda-forge\n
    "},{"location":"getting_started/#scale-api-keys","title":"Scale API Keys","text":"

    To leverage Scale's hosted versions of these models, you will need a Scale Spellbook API key.

    "},{"location":"getting_started/#retrieving-your-api-key","title":"Retrieving your API Key","text":"

    To retrieve your API key, head to Scale Spellbook where you will get a Scale API key on the settings page.

    Different API Keys for different Scale Products

    If you have leveraged Scale's platform for annotation work in the past, please note that your Spellbook API key will be different than the Scale Annotation API key. You will want to create a Spellbook API key before getting started.

    "},{"location":"getting_started/#using-your-api-key","title":"Using your API Key","text":"

    LLM Engine leverages environment variables to access your API key. Set this API key as the SCALE_API_KEY environment variable by adding the following line to your .zshrc or .bash_profile, or by running it in the terminal before you run your python application.

    Set API key
    export SCALE_API_KEY=\"[Your API key]\"\n
    "},{"location":"getting_started/#example-code","title":"Example Code","text":""},{"location":"getting_started/#sample-completion","title":"Sample Completion","text":"

    With your API key set, you can now send LLM Engine requests using the Python client:

    Using the Python Client
    from llmengine import Completion\nresponse = Completion.create(\nmodel=\"falcon-7b-instruct\",\nprompt=\"I'm opening a pancake restaurant that specializes in unique pancake shapes, colors, and flavors. List 3 quirky names I could name my restaurant.\",\nmax_new_tokens=100,\ntemperature=0.2,\n)\nprint(response.outputs[0].text)\n
    "},{"location":"getting_started/#with-streaming","title":"With Streaming","text":"Using the Python Client
    from llmengine import Completion\nimport sys\nstream = Completion.create(\nmodel=\"falcon-7b-instruct\",\nprompt=\"Give me a 200 word summary on the current economic events in the US.\",\nmax_new_tokens=1000,\ntemperature=0.2,\nstream=True\n)\nfor response in stream:\nif response.output:\nprint(response.output.text, end=\"\")\nsys.stdout.flush()\n
    "},{"location":"model_zoo/","title":"\ud83e\udd99 Public Model Zoo","text":"

    Scale hosts the following models in a model zoo:

    Model Name Inference APIs Available Fine-tuning APIs Available llama-7b \u2705 \u2705 falcon-7b \u2705 falcon-7b-instruct \u2705 falcon-40b \u2705 falcon-40b-instruct \u2705 mpt-7b \u2705 mpt-7b-instruct \u2705 \u2705 flan-t5-xxl \u2705

    Each of these models can be used with the Completion API.

    The specified models can be fine-tuned with the FineTune API.

    "},{"location":"api/data_types/","title":"\ud83d\udc0d Python Client Data Type Reference","text":""},{"location":"api/data_types/#llmengine.CompletionOutput","title":"CompletionOutput","text":"

    Bases: BaseModel

    Represents the output of a completion request to a model.

    "},{"location":"api/data_types/#llmengine.data_types.CompletionOutput.text","title":"text instance-attribute","text":"
    text: str\n

    The text of the completion.

    "},{"location":"api/data_types/#llmengine.data_types.CompletionOutput.num_completion_tokens","title":"num_completion_tokens instance-attribute","text":"
    num_completion_tokens: int\n

    Number of tokens in the completion.

    "},{"location":"api/data_types/#llmengine.CompletionStreamOutput","title":"CompletionStreamOutput","text":"

    Bases: BaseModel

    "},{"location":"api/data_types/#llmengine.data_types.CompletionStreamOutput.text","title":"text instance-attribute","text":"
    text: str\n

    The text of the completion.

    "},{"location":"api/data_types/#llmengine.data_types.CompletionStreamOutput.finished","title":"finished instance-attribute","text":"
    finished: bool\n

    Whether the completion is finished.

    "},{"location":"api/data_types/#llmengine.data_types.CompletionStreamOutput.num_completion_tokens","title":"num_completion_tokens class-attribute instance-attribute","text":"
    num_completion_tokens: Optional[int] = None\n

    Number of tokens in the completion.

    "},{"location":"api/data_types/#llmengine.CompletionSyncV1Response","title":"CompletionSyncV1Response","text":"

    Bases: BaseModel

    Response object for a synchronous prompt completion.

    "},{"location":"api/data_types/#llmengine.data_types.CompletionSyncV1Response.output","title":"output instance-attribute","text":"
    output: CompletionOutput\n

    Completion output.

    "},{"location":"api/data_types/#llmengine.data_types.CompletionSyncV1Response.request_id","title":"request_id instance-attribute","text":"
    request_id: str\n

    Unique ID of request.

    "},{"location":"api/data_types/#llmengine.CompletionStreamV1Response","title":"CompletionStreamV1Response","text":"

    Bases: BaseModel

    Response object for a stream prompt completion task.

    "},{"location":"api/data_types/#llmengine.data_types.CompletionStreamV1Response.output","title":"output class-attribute instance-attribute","text":"
    output: Optional[CompletionStreamOutput] = None\n

    Completion output.

    "},{"location":"api/data_types/#llmengine.data_types.CompletionStreamV1Response.request_id","title":"request_id instance-attribute","text":"
    request_id: str\n

    Unique ID of request.

    "},{"location":"api/data_types/#llmengine.CreateFineTuneResponse","title":"CreateFineTuneResponse","text":"

    Bases: BaseModel

    Response object for creating a FineTune.

    "},{"location":"api/data_types/#llmengine.data_types.CreateFineTuneResponse.fine_tune_id","title":"fine_tune_id class-attribute instance-attribute","text":"
    fine_tune_id: str = Field(Ellipsis, description='ID of the created fine-tuning job.')\n

    The ID of the FineTune.

    "},{"location":"api/data_types/#llmengine.GetFineTuneResponse","title":"GetFineTuneResponse","text":"

    Bases: BaseModel

    Response object for retrieving a FineTune.

    "},{"location":"api/data_types/#llmengine.data_types.GetFineTuneResponse.fine_tune_id","title":"fine_tune_id class-attribute instance-attribute","text":"
    fine_tune_id: str = Field(Ellipsis, description='ID of the requested job.')\n

    The ID of the FineTune.

    "},{"location":"api/data_types/#llmengine.data_types.GetFineTuneResponse.status","title":"status class-attribute instance-attribute","text":"
    status: BatchJobStatus = Field(Ellipsis, description='Status of the requested job.')\n

    The status of the FineTune job.

    "},{"location":"api/data_types/#llmengine.ListFineTunesResponse","title":"ListFineTunesResponse","text":"

    Bases: BaseModel

    Response object for listing FineTunes.

    "},{"location":"api/data_types/#llmengine.data_types.ListFineTunesResponse.jobs","title":"jobs class-attribute instance-attribute","text":"
    jobs: List[GetFineTuneResponse] = Field(Ellipsis, description='List of fine-tuning jobs and their statuses.')\n

    A list of FineTunes, represented as GetFineTuneResponses.

    "},{"location":"api/data_types/#llmengine.CancelFineTuneResponse","title":"CancelFineTuneResponse","text":"

    Bases: BaseModel

    Response object for cancelling a FineTune.

    "},{"location":"api/data_types/#llmengine.data_types.CancelFineTuneResponse.success","title":"success class-attribute instance-attribute","text":"
    success: bool = Field(Ellipsis, description='Whether cancellation was successful.')\n

    Whether the cancellation succeeded.

    "},{"location":"api/error_handling/","title":"Error handling","text":"

    LLM Engine uses conventional HTTP response codes to indicate the success or failure of an API request. In general: codes in the 2xx range indicate success. Codes in the 4xx range indicate indicate an error that failed given the information provided (e.g. a given Model was not found, or an invalid temperature was specified). Codes in the 5xx range indicate an error with the LLM Engine servers.

    In the Python client, errors are presented via a set of corresponding Exception classes, which should be caught and handled by the user accordingly.

    "},{"location":"api/error_handling/#llmengine.errors.BadRequestError","title":"BadRequestError","text":"
    BadRequestError(message: str)\n

    Bases: Exception

    Corresponds to HTTP 400. Indicates that the request had inputs that were invalid. The user should not attempt to retry the request without changing the inputs.

    "},{"location":"api/error_handling/#llmengine.errors.UnauthorizedError","title":"UnauthorizedError","text":"
    UnauthorizedError(message: str)\n

    Bases: Exception

    Corresponds to HTTP 401. This means that no valid API key was provided.

    "},{"location":"api/error_handling/#llmengine.errors.NotFoundError","title":"NotFoundError","text":"
    NotFoundError(message: str)\n

    Bases: Exception

    Corresponds to HTTP 404. This means that the resource (e.g. a Model, FineTune, etc.) could not be found. Note that this can also be returned in some cases where the object might exist, but the user does not have access to the object. This is done to avoid leaking information about the existence or nonexistence of said object that the user does not have access to.

    "},{"location":"api/error_handling/#llmengine.errors.RateLimitExceededError","title":"RateLimitExceededError","text":"
    RateLimitExceededError(message: str)\n

    Bases: Exception

    Corresponds to HTTP 429. Too many requests hit the API too quickly. We recommend an exponential backoff for retries.

    "},{"location":"api/error_handling/#llmengine.errors.ServerError","title":"ServerError","text":"
    ServerError(status_code: int, message: str)\n

    Bases: Exception

    Corresponds to HTTP 5xx errors on the server.

    "},{"location":"api/langchain/","title":"\ud83e\udd9c Langchain","text":"

    Coming soon!

    "},{"location":"api/python_client/","title":"\ud83d\udc0d Python Client API Reference","text":""},{"location":"api/python_client/#llmengine.Completion","title":"Completion","text":"

    Bases: APIEngine

    Completion API. This API is used to generate text completions.

    Language Models are trained to understand natural language and provide text outputs as a response to their inputs. The inputs are called prompts and outputs are referred to as completions. LLMs take the input prompts and chunk them smaller units called tokens to process and generate language. Tokens may include trailing spaces and even sub-words; this process is language dependent.

    The Completions API can be run either synchronous or asynchronously (via Python asyncio); for each of these modes, you can also choose to stream token responses or not.

    "},{"location":"api/python_client/#llmengine.completion.Completion.create","title":"create classmethod","text":"
    create(model: str, prompt: str, max_new_tokens: int = 20, temperature: float = 0.2, timeout: int = 10, stream: bool = False) -> Union[CompletionSyncV1Response, Iterator[CompletionStreamV1Response]]\n

    Creates a completion for the provided prompt and parameters synchronously.

    Parameters:

    Name Type Description Default model str

    Name of the model to use. See Model Zoo for a list of Models that are supported.

    required prompt str

    The prompt to generate completions for, encoded as a string.

    required max_new_tokens int

    The maximum number of tokens to generate in the completion.

    The token count of your prompt plus max_new_tokens cannot exceed the model's context length. See Model Zoo for information on each supported model's context length.

    20 temperature float

    What sampling temperature to use, in the range (0, 1]. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.

    0.2 timeout int

    Timeout in seconds. This is the maximum amount of time you are willing to wait for a response.

    10 stream bool

    Whether to stream the response. If true, the return type is an Iterator[CompletionStreamV1Response]. Otherwise, the return type is a CompletionSyncV1Response. When streaming, tokens will be sent as data-only server-sent events.

    False

    Returns:

    Name Type Description response Union[CompletionSyncV1Response, AsyncIterable[CompletionStreamV1Response]]

    The generated response (if streaming=False) or iterator of response chunks (if streaming=True)

    Example request without token streaming
    from llmengine import Completion\nresponse = Completion.create(\nmodel=\"llama-7b\",\nprompt=\"Hello, my name is\",\nmax_new_tokens=10,\ntemperature=0.2,\n)\nprint(response.json())\n
    JSON Response
    {\n\"request_id\": \"0123456789\",\n\"outputs\":\n[\n{\n\"text\": \"_______ and I am a _______\",\n\"num_completion_tokens\": 10\n}\n],\n\"traceback\": null\n}\n
    Example request with token streaming
    from llmengine import Completion\nstream = Completion.create(\nmodel=\"llama-7b\",\nprompt=\"why is the sky blue?\",\nmax_new_tokens=5,\ntemperature=0.2,\nstream=True,\n)\nfor response in stream:\nif response.output:\nprint(response.json())\n
    JSON responses
    {\"request_id\": \"0123456789\", \"output\": {\"text\": \"\\n\", \"finished\": false, \"num_completion_tokens\": 1 } }\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \"I\", \"finished\": false, \"num_completion_tokens\": 2 } }\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \" don\", \"finished\": false, \"num_completion_tokens\": 3 } }\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \"\u2019\", \"finished\": false, \"num_completion_tokens\": 4 } }\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \"t\", \"finished\": true, \"num_completion_tokens\": 5 } }\n
    "},{"location":"api/python_client/#llmengine.completion.Completion.acreate","title":"acreate async classmethod","text":"
    acreate(model: str, prompt: str, max_new_tokens: int = 20, temperature: float = 0.2, timeout: int = 10, stream: bool = False) -> Union[CompletionSyncV1Response, AsyncIterable[CompletionStreamV1Response]]\n

    Creates a completion for the provided prompt and parameters asynchronously (with asyncio).

    Parameters:

    Name Type Description Default model str

    Name of the model to use. See Model Zoo for a list of Models that are supported.

    required prompt str

    The prompt to generate completions for, encoded as a string.

    required max_new_tokens int

    The maximum number of tokens to generate in the completion.

    The token count of your prompt plus max_new_tokens cannot exceed the model's context length. See Model Zoo for information on each supported model's context length.

    20 temperature float

    What sampling temperature to use, in the range (0, 1]. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.

    0.2 timeout int

    Timeout in seconds. This is the maximum amount of time you are willing to wait for a response.

    10 stream bool

    Whether to stream the response. If true, the return type is an Iterator[CompletionStreamV1Response]. Otherwise, the return type is a CompletionSyncV1Response. When streaming, tokens will be sent as data-only server-sent events.

    False

    Returns:

    Name Type Description response Union[CompletionSyncV1Response, AsyncIterable[CompletionStreamV1Response]]

    The generated response (if streaming=False) or iterator of response chunks (if streaming=True)

    Example without token streaming
    import asyncio\nfrom llmengine import Completion\nasync def main():\nresponse = await Completion.acreate(\nmodel=\"llama-7b\",\nprompt=\"Hello, my name is\",\nmax_new_tokens=10,\ntemperature=0.2,\n)\nprint(response.json())\nasyncio.run(main())\n
    JSON response
    {\n\"request_id\": \"b1b2c3d4e5f6g7h8i9j0\",\n\"outputs\":\n[\n{\n\"text\": \"_______, and I am a _____\",\n\"num_completion_tokens\": 10\n}\n],\n}\n
    Example with token streaming
    import asyncio\nfrom llmengine import Completion\nasync def main():\nstream = await Completion.acreate(\nmodel=\"llama-7b\",\nprompt=\"why is the sky blue?\",\nmax_new_tokens=5,\ntemperature=0.2,\nstream=True,\n)\nasync for response in stream:\nif response.output:\nprint(response.json())\nasyncio.run(main())\n
    JSON responses
    {\"request_id\": \"0123456789\", \"output\": {\"text\": \"\\n\", \"finished\": false, \"num_completion_tokens\": 1}}\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \"I\", \"finished\": false, \"num_completion_tokens\": 2}}\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \" think\", \"finished\": false, \"num_completion_tokens\": 3}}\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \" the\", \"finished\": false, \"num_completion_tokens\": 4}}\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \" sky\", \"finished\": true, \"num_completion_tokens\": 5}}\n
    "},{"location":"api/python_client/#llmengine.FineTune","title":"FineTune","text":"

    Bases: APIEngine

    FineTune API. This API is used to fine-tune models.

    Fine-tuning is a process where the LLM is further trained on a task-specific dataset, allowing the model to adjust its parameters to better align with the task at hand. Fine-tuning involves the supervised training phase, where prompt/response pairs are provided to optimize the performance of the LLM.

    Scale llm-engine provides APIs to create fine-tunes on a base-model with training & validation data-sets. APIs are also provided to list, cancel and retrieve fine-tuning jobs.

    "},{"location":"api/python_client/#llmengine.fine_tuning.FineTune.create","title":"create classmethod","text":"
    create(model: str, training_file: str, validation_file: Optional[str] = None, hyperparameters: Optional[Dict[str, str]] = None, suffix: Optional[str] = None) -> CreateFineTuneResponse\n

    Creates a job that fine-tunes a specified model from a given dataset.

    Parameters:

    Name Type Description Default model `str`

    The name of the base model to fine-tune. See Model Zoo for the list of available models to fine-tune.

    required training_file `str`

    Path to file of training dataset

    required validation_file `Optional[str]`

    Path to file of validation dataset

    None hyperparameters `str`

    Hyperparameters

    None suffix `Optional[str]`

    A string that will be added to your fine-tuned model name.

    None

    Returns:

    Name Type Description CreateFineTuneResponse CreateFineTuneResponse

    an object that contains the ID of the created fine-tuning job

    The model is the name of base model (Model Zoo for available models) to fine. The training file should consist of prompt and response pairs. Your data must be formatted as a CSV file that includes two columns: prompt and response. A maximum of 100,000 rows of data is currently supported. At least 200 rows of data is recommended to start to see benefits from fine-tuning.

    Here is an example script to create a 5-row CSV of properly formatted data for fine-tuning an airline question answering bot:

    import csv\n# Define data\ndata = [\n(\"What is your policy on carry-on luggage?\", \"Our policy allows each passenger to bring one piece of carry-on luggage and one personal item such as a purse or briefcase. The maximum size for carry-on luggage is 22 x 14 x 9 inches.\"),\n(\"How can I change my flight?\", \"You can change your flight through our website or mobile app. Go to 'Manage my booking' section, enter your booking reference and last name, then follow the prompts to change your flight.\"),\n(\"What meals are available on my flight?\", \"We offer a variety of meals depending on the flight's duration and route. These can range from snacks and light refreshments to full-course meals on long-haul flights. Specific meal options can be viewed during the booking process.\"),\n(\"How early should I arrive at the airport before my flight?\", \"We recommend arriving at least two hours before domestic flights and three hours before international flights.\"),\n\"Can I select my seat in advance?\", \"Yes, you can select your seat during the booking process or afterwards via the 'Manage my booking' section on our website or mobile app.\"),\n]\n# Write data to a CSV file\nwith open('customer_service_data.csv', 'w', newline='') as file:\nwriter = csv.writer(file)\nwriter.writerow([\"prompt\", \"response\"])\nwriter.writerows(data)\n
    Example code for fine-tuning
    from llmengine import FineTune\nresponse = FineTune.create(\nmodel=\"llama-7b\",\ntraining_file=\"s3://my-bucket/path/to/training-file.csv\",\n)\nprint(response.json())\n
    JSON Response
    {\n\"fine_tune_id\": \"ft_abc123\"\n}\n
    "},{"location":"api/python_client/#llmengine.fine_tuning.FineTune.list","title":"list classmethod","text":"
    list() -> ListFineTunesResponse\n

    List fine-tuning jobs

    Returns:

    Name Type Description ListFineTunesResponse ListFineTunesResponse

    an object that contains a list of all fine-tuning jobs and their statuses

    Example
    from llmengine import FineTune\nresponse = FineTune.list()\nprint(response.json())\n
    JSON Response
    [\n{\n\"fine_tune_id\": \"ft_abc123\",\n\"status\": \"RUNNING\"\n},\n{\n\"fine_tune_id\": \"ft_def456\",\n\"status\": \"SUCCESS\"\n}\n]\n
    "},{"location":"api/python_client/#llmengine.fine_tuning.FineTune.retrieve","title":"retrieve classmethod","text":"
    retrieve(fine_tune_id: str) -> GetFineTuneResponse\n

    Get status of a fine-tuning job

    Parameters:

    Name Type Description Default fine_tune_id `str`

    ID of the fine-tuning job

    required

    Returns:

    Name Type Description GetFineTuneResponse GetFineTuneResponse

    an object that contains the ID and status of the requested job

    Example
    from llmengine import FineTune\nresponse = FineTune.retrieve(\nfine_tune_id=\"ft_abc123\",\n)\nprint(response.json())\n
    JSON Response
    {\n\"fine_tune_id\": \"ft_abc123\",\n\"status\": \"RUNNING\"\n}\n
    "},{"location":"api/python_client/#llmengine.fine_tuning.FineTune.cancel","title":"cancel classmethod","text":"
    cancel(fine_tune_id: str) -> CancelFineTuneResponse\n

    Cancel a fine-tuning job

    Parameters:

    Name Type Description Default fine_tune_id `str`

    ID of the fine-tuning job

    required

    Returns:

    Name Type Description CancelFineTuneResponse CancelFineTuneResponse

    an object that contains whether the cancellation was successful

    Example
    from llmengine import FineTune\nresponse = FineTune.cancel(fine_tune_id=\"ft_abc123\")\nprint(response.json())\n
    JSON Response
    {\n\"success\": \"true\"\n}\n
    "},{"location":"guides/completions/","title":"Completions","text":"

    Language Models are trained to understand natural language and provide text outputs as a response to their inputs. The inputs are called prompts and outputs are referred to as completions. LLMs take the input prompts and chunk them smaller units called tokens to process and generate language. Tokens may include trailing spaces and even sub-words, this process is language dependent.

    Scale LLM Engine provides access to open source language models (see Model Zoo) that can be used for producing completions to prompts.

    "},{"location":"guides/completions/#completion-api-call","title":"Completion API call","text":"

    An example API call looks as follows:

    from llmengine import Completion\nresponse = Completion.create(\nmodel=\"llama-7b\",\nprompt=\"Hello, my name is\",\nmax_new_tokens=10,\ntemperature=0.2,\n)\n

    The model_name is the LLM to be used (see Model Zoo). The prompt is the main input for the LLM to respond to. The max_new_tokens parameter is the maximum number of tokens to generate in the chat completion. The temperature is the sampling temperature to use. Higher values make the output more random, while lower values will make it more focussed and deterministic.

    See the full API reference documentation to learn more.

    "},{"location":"guides/completions/#completion-api-response","title":"Completion API response","text":"

    An example Completion API response looks as follows:

    {\n\"outputs\": [\n{\n\"text\": \"_______ and I am a _______\",\n\"num_completion_tokens\": 10\n}\n]\n}\n

    In Python, the response is of type CompletionSyncV1Response, which maps to the above JSON structure.

    print( response.outputs[0].text )\n
    "},{"location":"guides/completions/#token-streaming","title":"Token streaming","text":"

    The Completions API support token streaming to reduce perceived latency for certain applications. When streaming, tokens will be sent as data-only server-side events.

    To enable token streaming, pass stream=True to either Completion.create or Completion.acreate.

    An example of token streaming using the synchronous Completions API looks as follows:

    from llmengine import Completion\nstream = Completion.create(\nmodel=\"llama-7b\",\nprompt=\"why is the sky blue?\",\nmax_new_tokens=5,\ntemperature=0.2,\nstream=True,\n)\nfor response in stream:\nif response.output:\nprint(response.json())\n
    "},{"location":"guides/completions/#async-requests","title":"Async requests","text":"

    The Python client supports asyncio for creating Completions. Use Completion.acreate instead of Completion.create to utilize async processing. The function signatures are otherwise identical.

    An example of async Completions looks as follows:

    import asyncio\nfrom llmengine import Completion\nasync def main():\nresponse = await Completion.acreate(\nmodel=\"llama-7b\",\nprompt=\"Hello, my name is\",\nmax_new_tokens=10,\ntemperature=0.2,\n)\nprint(response.json())\nasyncio.run(main())\n
    "},{"location":"guides/completions/#which-model-should-i-use","title":"Which model should I use?","text":"

    See the Model Zoo for more information on best practices for which model to use for Completions.

    "},{"location":"guides/fine_tuning/","title":"Fine-tuning","text":"

    Learn how to customize your models on your data with fine-tuning.

    "},{"location":"guides/fine_tuning/#introduction","title":"Introduction","text":"

    Fine-tuning helps improve model performance by training on specific examples of prompts and desired responses. LLMs are initially trained on data collected from the entire internet. With fine-tuning, LLMs can be optimized to perform better in a specific domain by learning from examples for that domain. Smaller LLMs that have been fine-tuned on a specific use case often outperform larger ones that were trained more generally.

    Fine-tuning allows for:

    1. Higher quality results than prompt engineering alone
    2. Cost savings through shorter prompts
    3. The ability to reach equivalent accuracy with a smaller model
    4. Lower latency at inference time
    5. The chance to show an LLM more examples than can fit in a single context window

    LLM Engine's fine-tuning API lets you fine-tune various open source LLMs on your own data and then make inference calls to the resulting LLM. For more specific details, see the fine-tuning API Python client reference.

    "},{"location":"guides/fine_tuning/#producing-high-quality-data-for-fine-tuning","title":"Producing high quality data for fine-tuning","text":"

    The training data for fine-tuning should consist of prompt and response pairs.

    As a rule of thumb, you should expect to see linear improvements in your fine-tuned model's quality with each doubling of the dataset size. Having high-quality data is also essential to improving performance. For every linear increase in the error rate in your training data, you may encounter a roughly quadratic increase in your fine-tuned model's error rate.

    High quality data is critical to achieve improved model performance, and in several cases will require experts to generate and prepare data - the breadth and diversity of the data is highly critical. Scale's Data Engine can help prepare such high quality, diverse data sets - more information here.

    "},{"location":"guides/fine_tuning/#preparing-data","title":"Preparing data","text":"

    Your data must be formatted as a CSV file that includes two columns: prompt and response. A maximum of 100,000 rows of data is currently supported. At least 200 rows of data is recommended to start to see benefits from fine-tuning.

    Here is an example script to create a 50-row CSV of properly formatted data for fine-tuning an airline question answering bot:

    Creating a sample dataset
    import csv\n# Define data\ndata = [\n(\"What is your policy on carry-on luggage?\", \"Our policy allows each passenger to bring one piece of carry-on luggage and one personal item such as a purse or briefcase. The maximum size for carry-on luggage is 22 x 14 x 9 inches.\"),\n(\"How can I change my flight?\", \"You can change your flight through our website or mobile app. Go to 'Manage my booking' section, enter your booking reference and last name, then follow the prompts to change your flight.\"),\n(\"What meals are available on my flight?\", \"We offer a variety of meals depending on the flight's duration and route. These can range from snacks and light refreshments to full-course meals on long-haul flights. Specific meal options can be viewed during the booking process.\"),\n(\"How early should I arrive at the airport before my flight?\", \"We recommend arriving at least two hours before domestic flights and three hours before international flights.\"),\n(\"Can I select my seat in advance?\", \"Yes, you can select your seat during the booking process or afterwards via the 'Manage my booking' section on our website or mobile app.\"),\n(\"What should I do if my luggage is lost?\", \"If your luggage is lost, please report this immediately at our 'Lost and Found' counter at the airport. We will assist you in tracking your luggage.\"),\n(\"Do you offer special assistance for passengers with disabilities?\", \"Yes, we offer special assistance for passengers with disabilities. Please notify us of your needs at least 48 hours prior to your flight.\"),\n(\"Can I bring my pet on the flight?\", \"Yes, we allow small pets in the cabin, and larger pets in the cargo hold. Please check our pet policy for more details.\"),\n(\"What is your policy on flight cancellations?\", \"In case of flight cancellations, we aim to notify passengers as early as possible and offer either a refund or a rebooking on the next available flight.\"),\n(\"Can I get a refund if I cancel my flight?\", \"Refunds depend on the type of ticket purchased. Please check our cancellation policy for details. Non-refundable tickets, however, are typically not eligible for refunds unless due to extraordinary circumstances.\"),\n(\"How can I check-in for my flight?\", \"You can check-in for your flight either online, through our mobile app, or at the airport. Online and mobile app check-in opens 24 hours before departure and closes 90 minutes before.\"),\n(\"Do you offer free meals on your flights?\", \"Yes, we serve free meals on all long-haul flights. For short-haul flights, we offer a complimentary drink and snack. Special meal requests should be made at least 48 hours before departure.\"),\n(\"Can I use my electronic devices during the flight?\", \"Small electronic devices can be used throughout the flight in flight mode. Larger devices like laptops may be used above 10,000 feet.\"),\n(\"How much baggage can I check-in?\", \"The checked baggage allowance depends on the class of travel and route. The details would be mentioned on your ticket, or you can check on our website.\"),\n(\"How can I request for a wheelchair?\", \"To request a wheelchair or any other special assistance, please call our customer service at least 48 hours before your flight.\"),\n(\"Do I get a discount for group bookings?\", \"Yes, we offer discounts on group bookings of 10 or more passengers. Please contact our group bookings team for more information.\"),\n(\"Do you offer Wi-fi on your flights?\", \"Yes, we offer complimentary Wi-fi on select flights. You can check the availability during the booking process.\"),\n(\"What is the minimum connecting time between flights?\", \"The minimum connecting time varies depending on the airport and whether your flight is international or domestic. Generally, it's recommended to allow at least 45-60 minutes for domestic connections and 60-120 minutes for international.\"),\n(\"Do you offer duty-free shopping on international flights?\", \"Yes, we have a selection of duty-free items that you can pre-order on our website or purchase onboard on international flights.\"),\n(\"Can I upgrade my ticket to business class?\", \"Yes, you can upgrade your ticket through the 'Manage my booking' section on our website or by contacting our customer service. The availability and costs depend on the specific flight.\"),\n(\"Can unaccompanied minors travel on your flights?\", \"Yes, we do accommodate unaccompanied minors on our flights, with special services to ensure their safety and comfort. Please contact our customer service for more details.\"),\n(\"What amenities do you provide in business class?\", \"In business class, you will enjoy additional legroom, reclining seats, premium meals, priority boarding and disembarkation, access to our business lounge, extra baggage allowance, and personalized service.\"),\n(\"How much does extra baggage cost?\", \"Extra baggage costs vary based on flight route and the weight of the baggage. Please refer to our 'Extra Baggage' section on the website for specific rates.\"),\n(\"Are there any specific rules for carrying liquids in carry-on?\", \"Yes, liquids carried in your hand luggage must be in containers of 100 ml or less and they should all fit into a single, transparent, resealable plastic bag of 20 cm x 20 cm.\"),\n(\"What if I have a medical condition that requires special assistance during the flight?\", \"We aim to make the flight comfortable for all passengers. If you have a medical condition that may require special assistance, please contact our \u2018special services\u2019 team 48 hours before your flight.\"),\n(\"What in-flight entertainment options are available?\", \"We offer a range of in-flight entertainment options including a selection of movies, TV shows, music, and games, available on your personal seat-back screen.\"),\n(\"What types of payment methods do you accept?\", \"We accept credit/debit cards, PayPal, bank transfers, and various other forms of payment. The available options may vary depending on the country of departure.\"),\n(\"How can I earn and redeem frequent flyer miles?\", \"You can earn miles for every journey you take with us or our partner airlines. These miles can be redeemed for flight tickets, upgrades, or various other benefits. To earn and redeem miles, you need to join our frequent flyer program.\"),\n(\"Can I bring a stroller for my baby?\", \"Yes, you can bring a stroller for your baby. It can be checked in for free and will normally be given back to you at the aircraft door upon arrival.\"),\n(\"What age does my child have to be to qualify as an unaccompanied minor?\", \"Children aged between 5 and 12 years who are traveling alone are considered unaccompanied minors. Our team provides special care for these children from departure to arrival.\"),\n(\"What documents do I need to travel internationally?\", \"For international travel, you need a valid passport and may also require visas, depending on your destination and your country of residence. It's important to check the specific requirements before you travel.\"),\n(\"What happens if I miss my flight?\", \"If you miss your flight, please contact our customer service immediately. Depending on the circumstances, you may be able to rebook on a later flight, but additional fees may apply.\"),\n(\"Can I travel with my musical instrument?\", \"Yes, small musical instruments can be brought on board as your one carry-on item. Larger instruments must be transported in the cargo, or if small enough, a seat may be purchased for them.\"),\n(\"Do you offer discounts for children or infants?\", \"Yes, children aged 2-11 traveling with an adult usually receive a discount on the fare. Infants under the age of 2 who do not occupy a seat can travel for a reduced fare or sometimes for free.\"),\n(\"Is smoking allowed on your flights?\", \"No, all our flights are non-smoking for the comfort and safety of all passengers.\"),\n(\"Do you have family seating?\", \"Yes, we offer the option to seat families together. You can select seats during booking or afterwards through the 'Manage my booking' section on the website.\"),\n(\"Is there any discount for senior citizens?\", \"Some flights may offer a discount for senior citizens. Please check our website or contact customer service for accurate information.\"),\n(\"What items are prohibited on your flights?\", \"Prohibited items include, but are not limited to, sharp objects, firearms, explosive materials, and certain chemicals. You can find a comprehensive list on our website under the 'Security Regulations' section.\"),\n(\"Can I purchase a ticket for someone else?\", \"Yes, you can purchase a ticket for someone else. You'll need their correct name as it appears on their government-issued ID, and their correct travel dates.\"),\n(\"What is the process for lost and found items on the plane?\", \"If you realize you forgot an item on the plane, report it as soon as possible to our lost and found counter. We will make every effort to locate and return your item.\"),\n(\"Can I request a special meal?\", \"Yes, we offer a variety of special meals to accommodate dietary restrictions. Please request your preferred meal at least 48 hours prior to your flight.\"),\n(\"Is there a weight limit for checked baggage?\", \"Yes, luggage weight limits depend on your ticket class and route. You can find the details on your ticket or by visiting our website.\"),\n(\"Can I bring my sports equipment?\", \"Yes, certain types of sports equipment can be carried either as or in addition to your permitted baggage. Some equipment may require additional fees. It's best to check our policy on our website or contact us directly.\"),\n(\"Do I need a visa to travel to certain countries?\", \"Yes, visa requirements depend on the country you are visiting and your nationality. We advise checking with the relevant embassy or consulate prior to travel.\"),\n(\"How can I add extra baggage to my booking?\", \"You can add extra baggage to your booking through the 'Manage my booking' section on our website or by contacting our customer services.\"),\n(\"Can I check-in at the airport?\", \"Yes, you can choose to check-in at the airport. However, we also offer online and mobile check-in, which may save you time.\"),\n(\"How do I know if my flight is delayed or cancelled?\", \"In case of any changes to your flight, we will attempt to notify all passengers using the contact information given at the time of booking. You can also check your flight status on our website.\"),\n(\"What is your policy on pregnant passengers?\", \"Pregnant passengers can travel up to the end of the 36th week for single pregnancies, and the end of the 32nd week for multiple pregnancies. We recommend consulting your doctor before any air travel.\"),\n(\"Can children travel alone?\", \"Yes, children age 5 to 12 can travel alone as unaccompanied minors. We provide special care for these seats. Please contact our customer service for more information.\"),\n(\"How can I pay for my booking?\", \"You can pay for your booking using a variety of methods including credit and debit cards, PayPal, or bank transfers. The options may vary depending on the country of departure.\"),\n]\n# Write data to a CSV file\nwith open('customer_service_data.csv', 'w', newline='') as file:\nwriter = csv.writer(file)\nwriter.writerow([\"prompt\", \"response\"])\nwriter.writerows(data)\n
    "},{"location":"guides/fine_tuning/#making-your-data-accessible-to-llm-engine","title":"Making your data accessible to LLM Engine","text":"

    Currently, data needs to be uploaded to a publicly accessible web URL so that it can be read for fine-tuning. Publicly accessible HTTP, HTTPS, and S3 URLs are currently supported. Support for privately sharing data with the LLM Engine API is coming shortly. For quick iteration, you can look into tools like Pastebin or Github Gists to quickly host your CSV files in a public manner. We created an example Github Gist you can see here. To use the gist, you can just use the URL given when you click the \u201cRaw\u201d button (URL).

    "},{"location":"guides/fine_tuning/#launching-the-fine-tune","title":"Launching the fine-tune","text":"

    Once you have uploaded your data, you can use the LLM Engine API to launch a fine-tune. You will need to specify which base model to fine-tune, the locations of the training file and optional validation data file, an optional set of hyperparameters to customize the fine-tuning behavior, and an optional suffix to append to the name of the fine-tune.

    Create a fine-tune
    from llmengine import FineTune\nresponse = FineTune.create(\nmodel=\"llama-7b\",\ntraining_file=\"s3://my-bucket/path/to/training-file.csv\",\n)\nprint(response.json())\n

    See the Model Zoo to see which models have fine-tuning support.

    Once the fine-tune is launched, you can also get the status of your fine-tune.

    "},{"location":"guides/fine_tuning/#making-inference-calls-to-your-fine-tune","title":"Making inference calls to your fine-tune","text":"

    Once your fine-tune is finished, you will be able to start making inference requests to the model. You can use the fine_tune_id returned from your FineTune.create API call to reference your fine-tuned model in the Completions API. Alternatively, you can list available LLMs with Model.list in order to find the name of your fine-tuned model. See the Completion API for more details. You can then use that name to direct your completion requests.

    Inference with a fine-tuned model
    from llmengine import Completion\nresponse = Completion.create(\nmodel_name=\"ft_abc123\",\nprompt=\"Do you offer in-flight Wi-fi?\",\nmax_new_tokens=100,\ntemperature=0.2,\n)\nprint(response.json())\n
    "},{"location":"guides/rate_limits/","title":"Overview","text":""},{"location":"guides/rate_limits/#what-are-rate-limits","title":"What are rate limits?","text":"

    A rate limit is a restriction that an API imposes on the number of times a user or client can access the server within a specified period of time.

    "},{"location":"guides/rate_limits/#how-do-i-know-if-i-am-rate-limited","title":"How do I know if I am rate limited?","text":"

    Per standard HTTP practices, your request will receive a response with HTTP status code of 429, Too Many Requests.

    "},{"location":"guides/rate_limits/#what-are-the-rate-limits-for-our-api","title":"What are the rate limits for our API?","text":"

    The LLM Engine API is currently in a preview mode, and therefore we currently do not have any advertised rate limits. As the API moves towards a production release, we will update this section with specific rate limits. For now, the API will return HTTP 429 on an as-needed basis.

    "},{"location":"guides/rate_limits/#error-mitigation","title":"Error mitigation","text":""},{"location":"guides/rate_limits/#retrying-with-exponential-backoff","title":"Retrying with exponential backoff","text":"

    One easy way to avoid rate limit errors is to automatically retry requests with a random exponential backoff. Retrying with exponential backoff means performing a short sleep when a rate limit error is hit, then retrying the unsuccessful request. If the request is still unsuccessful, the sleep length is increased and the process is repeated. This continues until the request is successful or until a maximum number of retries is reached. This approach has many benefits:

    • Automatic retries means you can recover from rate limit errors without crashes or missing data
    • Exponential backoff means that your first retries can be tried quickly, while still benefiting from longer delays if your first few retries fail
    • Adding random jitter to the delay helps retries from all hitting at the same time.

    Below are a few example solutions for Python that use exponential backoff.

    "},{"location":"guides/rate_limits/#example-1-using-the-tenacity-library","title":"Example #1: Using the tenacity library","text":"

    Tenacity is an Apache 2.0 licensed general-purpose retrying library, written in Python, to simplify the task of adding retry behavior to just about anything. To add exponential backoff to your requests, you can use the tenacity.retry decorator. The below example uses the tenacity.wait_random_exponential function to add random exponential backoff to a request.

    import llmengine\nfrom tenacity import (\nretry,\nstop_after_attempt,\nwait_random_exponential,\n)  # for exponential backoff\n@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))\ndef completion_with_backoff(**kwargs):\nreturn llmengine.Completion.create(**kwargs)\ncompletion_with_backoff(model=\"llama-7b\", prompt=\"Why is the sky blue?\")\n
    "},{"location":"guides/rate_limits/#example-2-using-the-backoff-library","title":"Example #2: Using the backoff library","text":"

    Another python library that provides function decorators for backoff and retry is backoff:

    import llmengine\nimport backoff\n@backoff.on_exception(backoff.expo, llmengine.error.RateLimitError)\ndef completions_with_backoff(**kwargs):\nreturn llmengine.Completion.create(**kwargs)\ncompletions_with_backoff(model=\"llama-7b\", prompt=\"Why is the sky blue?\")\n
    "},{"location":"guides/token_streaming/","title":"Token streaming","text":"

    The Completions APIs support a stream boolean parameter that, when True, will return a streamed response of token-by-token server-sent events (SSEs) rather than waiting to receive the full response when model generation has finished. This decreases latency of when you start getting a response.

    The response will consist of SSEs of the form {\"token\": dict, \"generated_text\": str | null, \"details\": dict | null}, where the dictionary for each token will contain log probability information in addition to the generated string; the generated_text field will be null for all but the last SSE, for which it will contain the full generated response.

    "}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"\u26a1 LLM Engine \u26a1","text":"

    The open source engine for fine-tuning large language models. LLM Engine is the easiest way to customize and serve LLMs. Use Scale's hosted version or run it in your own cloud.

    "},{"location":"#quick-install","title":"\ud83d\udcbb Quick Install","text":"Install using pip
    pip install scale-llm-engine\n
    "},{"location":"#about","title":"\ud83e\udd14 About","text":"

    Foundation models are emerging as the building blocks of AI. However, fine-tuning these models and deploying them to the cloud are expensive operations that require infrastructure and ML expertise.

    LLM Engine is a Python library, CLI, and Helm chart that provides everything you need to fine-tune and serve foundation models in the cloud using Kubernetes. Key features include:

    \ud83c\udf81 Ready-to-use APIs for your favorite models: Fine-tune and serve open-source foundation models, including MPT, Falcon, and LLaMA. Use Scale-hosted endpoints or deploy to your own infrastructure.

    \ud83d\udc33 Deploying from any docker image: Turn any Docker image into an auto-scaling deployment with simple APIs.

    \ud83c\udf99\ufe0fOptimized Inference: LLM Engine provides inference APIs for streaming responses and dynamically batching inputs for higher throughput and lower latency.

    \ud83e\udd17 Open-Source Integrations: Deploy any Hugging Face model with a single command.

    "},{"location":"#features-coming-soon","title":"\ud83d\udd25 Features Coming Soon","text":"

    \u2744 Fast Cold-Start Times: To prevent GPUs from idling, LLM Engine automatically scales your model to zero when it's not in use and scales up within seconds, even for large foundation models.

    \ud83d\udcb8 Cost Optimization: Deploy AI models cheaper than commercial ones, including cold-start and warm-down times.

    "},{"location":"faq/","title":"Frequently Asked Questions","text":""},{"location":"getting_started/","title":"\ud83d\ude80 Getting Started","text":"

    To start using LLM Engine's public inference and fine-tuning APIs:

    Install using pipInstall using conda
    pip install scale-llm-engine\n
    conda install scale-llm-engine -c conda-forge\n
    "},{"location":"getting_started/#scale-api-keys","title":"Scale API Keys","text":"

    To leverage Scale's hosted versions of these models, you will need a Scale Spellbook API key.

    "},{"location":"getting_started/#retrieving-your-api-key","title":"Retrieving your API Key","text":"

    To retrieve your API key, head to Scale Spellbook where you will get a Scale API key on the settings page.

    Different API Keys for different Scale Products

    If you have leveraged Scale's platform for annotation work in the past, please note that your Spellbook API key will be different than the Scale Annotation API key. You will want to create a Spellbook API key before getting started.

    "},{"location":"getting_started/#using-your-api-key","title":"Using your API Key","text":"

    LLM Engine leverages environment variables to access your API key. Set this API key as the SCALE_API_KEY environment variable by adding the following line to your .zshrc or .bash_profile, or by running it in the terminal before you run your python application.

    Set API key
    export SCALE_API_KEY=\"[Your API key]\"\n
    "},{"location":"getting_started/#example-code","title":"Example Code","text":""},{"location":"getting_started/#sample-completion","title":"Sample Completion","text":"

    With your API key set, you can now send LLM Engine requests using the Python client:

    Using the Python Client
    from llmengine import Completion\nresponse = Completion.create(\nmodel=\"falcon-7b-instruct\",\nprompt=\"I'm opening a pancake restaurant that specializes in unique pancake shapes, colors, and flavors. List 3 quirky names I could name my restaurant.\",\nmax_new_tokens=100,\ntemperature=0.2,\n)\nprint(response.outputs[0].text)\n
    "},{"location":"getting_started/#with-streaming","title":"With Streaming","text":"Using the Python Client
    from llmengine import Completion\nimport sys\nstream = Completion.create(\nmodel=\"falcon-7b-instruct\",\nprompt=\"Give me a 200 word summary on the current economic events in the US.\",\nmax_new_tokens=1000,\ntemperature=0.2,\nstream=True\n)\nfor response in stream:\nif response.output:\nprint(response.output.text, end=\"\")\nsys.stdout.flush()\n
    "},{"location":"model_zoo/","title":"\ud83e\udd99 Public Model Zoo","text":"

    Scale hosts the following models in a model zoo:

    Model Name Inference APIs Available Fine-tuning APIs Available llama-7b \u2705 \u2705 falcon-7b \u2705 falcon-7b-instruct \u2705 falcon-40b \u2705 falcon-40b-instruct \u2705 mpt-7b \u2705 mpt-7b-instruct \u2705 \u2705 flan-t5-xxl \u2705

    Each of these models can be used with the Completion API.

    The specified models can be fine-tuned with the FineTune API.

    More information about the model endpoints can be found using the Model API.

    "},{"location":"api/data_types/","title":"\ud83d\udc0d Python Client Data Type Reference","text":""},{"location":"api/data_types/#llmengine.CompletionOutput","title":"CompletionOutput","text":"

    Bases: BaseModel

    Represents the output of a completion request to a model.

    "},{"location":"api/data_types/#llmengine.data_types.CompletionOutput.text","title":"text instance-attribute","text":"
    text: str\n

    The text of the completion.

    "},{"location":"api/data_types/#llmengine.data_types.CompletionOutput.num_completion_tokens","title":"num_completion_tokens instance-attribute","text":"
    num_completion_tokens: int\n

    Number of tokens in the completion.

    "},{"location":"api/data_types/#llmengine.CompletionStreamOutput","title":"CompletionStreamOutput","text":"

    Bases: BaseModel

    "},{"location":"api/data_types/#llmengine.data_types.CompletionStreamOutput.text","title":"text instance-attribute","text":"
    text: str\n

    The text of the completion.

    "},{"location":"api/data_types/#llmengine.data_types.CompletionStreamOutput.finished","title":"finished instance-attribute","text":"
    finished: bool\n

    Whether the completion is finished.

    "},{"location":"api/data_types/#llmengine.data_types.CompletionStreamOutput.num_completion_tokens","title":"num_completion_tokens class-attribute instance-attribute","text":"
    num_completion_tokens: Optional[int] = None\n

    Number of tokens in the completion.

    "},{"location":"api/data_types/#llmengine.CompletionSyncV1Response","title":"CompletionSyncV1Response","text":"

    Bases: BaseModel

    Response object for a synchronous prompt completion.

    "},{"location":"api/data_types/#llmengine.data_types.CompletionSyncV1Response.output","title":"output instance-attribute","text":"
    output: CompletionOutput\n

    Completion output.

    "},{"location":"api/data_types/#llmengine.data_types.CompletionSyncV1Response.request_id","title":"request_id instance-attribute","text":"
    request_id: str\n

    Unique ID of request.

    "},{"location":"api/data_types/#llmengine.CompletionStreamV1Response","title":"CompletionStreamV1Response","text":"

    Bases: BaseModel

    Response object for a stream prompt completion task.

    "},{"location":"api/data_types/#llmengine.data_types.CompletionStreamV1Response.output","title":"output class-attribute instance-attribute","text":"
    output: Optional[CompletionStreamOutput] = None\n

    Completion output.

    "},{"location":"api/data_types/#llmengine.data_types.CompletionStreamV1Response.request_id","title":"request_id instance-attribute","text":"
    request_id: str\n

    Unique ID of request.

    "},{"location":"api/data_types/#llmengine.CreateFineTuneRequest","title":"CreateFineTuneRequest","text":"

    Bases: BaseModel

    Request object for creating a FineTune.

    "},{"location":"api/data_types/#llmengine.data_types.CreateFineTuneRequest.model","title":"model class-attribute instance-attribute","text":"
    model: str = Field(\n...,\ndescription=\"Identifier of base model to train from.\",\n)\n

    Identifier of base model to train from.

    "},{"location":"api/data_types/#llmengine.data_types.CreateFineTuneRequest.training_file","title":"training_file class-attribute instance-attribute","text":"
    training_file: str = Field(\n...,\ndescription=\"Path to file of training dataset. Dataset must be a csv with columns 'prompt' and 'response'.\",\n)\n

    Path to file of training dataset. Dataset must be a csv with columns 'prompt' and 'response'.

    "},{"location":"api/data_types/#llmengine.data_types.CreateFineTuneRequest.validation_file","title":"validation_file class-attribute instance-attribute","text":"
    validation_file: Optional[str] = Field(\ndefault=None,\ndescription=\"Path to file of validation dataset. Has the same format as training_file. If not provided, we will generate a split from the training dataset.\",\n)\n

    Path to file of validation dataset. Has the same format as training_file. If not provided, we will generate a split from the training dataset.

    "},{"location":"api/data_types/#llmengine.data_types.CreateFineTuneRequest.hyperparameters","title":"hyperparameters class-attribute instance-attribute","text":"
    hyperparameters: Optional[Dict[str, Any]] = Field(\ndefault=None,\ndescription=\"Hyperparameters to pass in to training job.\",\n)\n

    Hyperparameters to pass in to training job.

    "},{"location":"api/data_types/#llmengine.data_types.CreateFineTuneRequest.suffix","title":"suffix class-attribute instance-attribute","text":"
    suffix: Optional[str] = Field(\ndefault=None,\ndescription=\"Optional user-provided identifier suffix for the fine-tuned model.\",\n)\n

    Optional user-provided identifier suffix for the fine-tuned model.

    "},{"location":"api/data_types/#llmengine.CreateFineTuneResponse","title":"CreateFineTuneResponse","text":"

    Bases: BaseModel

    Response object for creating a FineTune.

    "},{"location":"api/data_types/#llmengine.data_types.CreateFineTuneResponse.fine_tune_id","title":"fine_tune_id class-attribute instance-attribute","text":"
    fine_tune_id: str = Field(\n..., description=\"ID of the created fine-tuning job.\"\n)\n

    The ID of the FineTune.

    "},{"location":"api/data_types/#llmengine.GetFineTuneResponse","title":"GetFineTuneResponse","text":"

    Bases: BaseModel

    Response object for retrieving a FineTune.

    "},{"location":"api/data_types/#llmengine.data_types.GetFineTuneResponse.fine_tune_id","title":"fine_tune_id class-attribute instance-attribute","text":"
    fine_tune_id: str = Field(\n..., description=\"ID of the requested job.\"\n)\n

    The ID of the FineTune.

    "},{"location":"api/data_types/#llmengine.data_types.GetFineTuneResponse.status","title":"status class-attribute instance-attribute","text":"
    status: BatchJobStatus = Field(\n..., description=\"Status of the requested job.\"\n)\n

    The status of the FineTune job.

    "},{"location":"api/data_types/#llmengine.ListFineTunesResponse","title":"ListFineTunesResponse","text":"

    Bases: BaseModel

    Response object for listing FineTunes.

    "},{"location":"api/data_types/#llmengine.data_types.ListFineTunesResponse.jobs","title":"jobs class-attribute instance-attribute","text":"
    jobs: List[GetFineTuneResponse] = Field(\n...,\ndescription=\"List of fine-tuning jobs and their statuses.\",\n)\n

    A list of FineTunes, represented as GetFineTuneResponses.

    "},{"location":"api/data_types/#llmengine.CancelFineTuneResponse","title":"CancelFineTuneResponse","text":"

    Bases: BaseModel

    Response object for cancelling a FineTune.

    "},{"location":"api/data_types/#llmengine.data_types.CancelFineTuneResponse.success","title":"success class-attribute instance-attribute","text":"
    success: bool = Field(\n..., description=\"Whether cancellation was successful.\"\n)\n

    Whether the cancellation succeeded.

    "},{"location":"api/data_types/#llmengine.GetLLMEndpointResponse","title":"GetLLMEndpointResponse","text":"

    Bases: BaseModel

    Response object for retrieving a Model.

    "},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.id","title":"id class-attribute instance-attribute","text":"
    id: str = Field(\ndescription=\"The autogenerated ID of the Launch endpoint.\"\n)\n

    The autogenerated ID of the Launch endpoint.

    "},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.name","title":"name class-attribute instance-attribute","text":"
    name: str = Field(\ndescription=\"The name of the Launch endpoint.\"\n)\n

    The name of the Launch endpoint.

    "},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.model_name","title":"model_name class-attribute instance-attribute","text":"
    model_name: str = Field(\ndescription=\"The name of the model.\"\n)\n

    The name of the model.

    "},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.source","title":"source class-attribute instance-attribute","text":"
    source: LLMSource = Field(\ndescription=\"The source of the model.\"\n)\n

    The source of the model.

    "},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.inference_framework","title":"inference_framework class-attribute instance-attribute","text":"
    inference_framework: LLMInferenceFramework = Field(\ndescription=\"The inference framework used by the endpoint.\"\n)\n

    The inference framework used by the endpoint.

    "},{"location":"api/data_types/#llmengine.data_types.GetLLMEndpointResponse.num_shards","title":"num_shards class-attribute instance-attribute","text":"
    num_shards: int = Field(description=\"The number of shards.\")\n

    The number of shards.

    "},{"location":"api/data_types/#llmengine.ListLLMEndpointsResponse","title":"ListLLMEndpointsResponse","text":"

    Bases: BaseModel

    Response object for listing Models.

    "},{"location":"api/data_types/#llmengine.data_types.ListLLMEndpointsResponse.model_endpoints","title":"model_endpoints class-attribute instance-attribute","text":"
    model_endpoints: List[GetLLMEndpointResponse] = Field(\n..., description=\"The list of LLM endpoints.\"\n)\n

    A list of Models, represented as GetLLMEndpointResponses.

    "},{"location":"api/data_types/#llmengine.DeleteLLMEndpointResponse","title":"DeleteLLMEndpointResponse","text":"

    Bases: BaseModel

    Response object for deleting a Model.

    "},{"location":"api/data_types/#llmengine.data_types.DeleteLLMEndpointResponse.deleted","title":"deleted class-attribute instance-attribute","text":"
    deleted: bool = Field(\n..., description=\"Whether deletion was successful.\"\n)\n

    Whether the deletion succeeded.

    "},{"location":"api/error_handling/","title":"Error handling","text":"

    LLM Engine uses conventional HTTP response codes to indicate the success or failure of an API request. In general: codes in the 2xx range indicate success. Codes in the 4xx range indicate indicate an error that failed given the information provided (e.g. a given Model was not found, or an invalid temperature was specified). Codes in the 5xx range indicate an error with the LLM Engine servers.

    In the Python client, errors are presented via a set of corresponding Exception classes, which should be caught and handled by the user accordingly.

    "},{"location":"api/error_handling/#llmengine.errors.BadRequestError","title":"BadRequestError","text":"
    BadRequestError(message: str)\n

    Bases: Exception

    Corresponds to HTTP 400. Indicates that the request had inputs that were invalid. The user should not attempt to retry the request without changing the inputs.

    "},{"location":"api/error_handling/#llmengine.errors.UnauthorizedError","title":"UnauthorizedError","text":"
    UnauthorizedError(message: str)\n

    Bases: Exception

    Corresponds to HTTP 401. This means that no valid API key was provided.

    "},{"location":"api/error_handling/#llmengine.errors.NotFoundError","title":"NotFoundError","text":"
    NotFoundError(message: str)\n

    Bases: Exception

    Corresponds to HTTP 404. This means that the resource (e.g. a Model, FineTune, etc.) could not be found. Note that this can also be returned in some cases where the object might exist, but the user does not have access to the object. This is done to avoid leaking information about the existence or nonexistence of said object that the user does not have access to.

    "},{"location":"api/error_handling/#llmengine.errors.RateLimitExceededError","title":"RateLimitExceededError","text":"
    RateLimitExceededError(message: str)\n

    Bases: Exception

    Corresponds to HTTP 429. Too many requests hit the API too quickly. We recommend an exponential backoff for retries.

    "},{"location":"api/error_handling/#llmengine.errors.ServerError","title":"ServerError","text":"
    ServerError(status_code: int, message: str)\n

    Bases: Exception

    Corresponds to HTTP 5xx errors on the server.

    "},{"location":"api/langchain/","title":"\ud83e\udd9c Langchain","text":"

    Coming soon!

    "},{"location":"api/python_client/","title":"\ud83d\udc0d Python Client API Reference","text":""},{"location":"api/python_client/#llmengine.Completion","title":"Completion","text":"

    Bases: APIEngine

    Completion API. This API is used to generate text completions.

    Language Models are trained to understand natural language and provide text outputs as a response to their inputs. The inputs are called prompts and outputs are referred to as completions. LLMs take the input prompts and chunk them smaller units called tokens to process and generate language. Tokens may include trailing spaces and even sub-words; this process is language dependent.

    The Completions API can be run either synchronous or asynchronously (via Python asyncio); for each of these modes, you can also choose to stream token responses or not.

    "},{"location":"api/python_client/#llmengine.completion.Completion.create","title":"create classmethod","text":"
    create(\nmodel: str,\nprompt: str,\nmax_new_tokens: int = 20,\ntemperature: float = 0.2,\ntimeout: int = 10,\nstream: bool = False,\n) -> Union[\nCompletionSyncV1Response,\nIterator[CompletionStreamV1Response],\n]\n

    Creates a completion for the provided prompt and parameters synchronously.

    Parameters:

    Name Type Description Default model str

    Name of the model to use. See Model Zoo for a list of Models that are supported.

    required prompt str

    The prompt to generate completions for, encoded as a string.

    required max_new_tokens int

    The maximum number of tokens to generate in the completion.

    The token count of your prompt plus max_new_tokens cannot exceed the model's context length. See Model Zoo for information on each supported model's context length.

    20 temperature float

    What sampling temperature to use, in the range (0, 1]. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.

    0.2 timeout int

    Timeout in seconds. This is the maximum amount of time you are willing to wait for a response.

    10 stream bool

    Whether to stream the response. If true, the return type is an Iterator[CompletionStreamV1Response]. Otherwise, the return type is a CompletionSyncV1Response. When streaming, tokens will be sent as data-only server-sent events.

    False

    Returns:

    Name Type Description response Union[CompletionSyncV1Response, AsyncIterable[CompletionStreamV1Response]]

    The generated response (if streaming=False) or iterator of response chunks (if streaming=True)

    Example request without token streaming
    from llmengine import Completion\nresponse = Completion.create(\nmodel=\"llama-7b\",\nprompt=\"Hello, my name is\",\nmax_new_tokens=10,\ntemperature=0.2,\n)\nprint(response.json())\n
    JSON Response
    {\n\"request_id\": \"0123456789\",\n\"outputs\":\n[\n{\n\"text\": \"_______ and I am a _______\",\n\"num_completion_tokens\": 10\n}\n],\n\"traceback\": null\n}\n
    Example request with token streaming
    from llmengine import Completion\nstream = Completion.create(\nmodel=\"llama-7b\",\nprompt=\"why is the sky blue?\",\nmax_new_tokens=5,\ntemperature=0.2,\nstream=True,\n)\nfor response in stream:\nif response.output:\nprint(response.json())\n
    JSON responses
    {\"request_id\": \"0123456789\", \"output\": {\"text\": \"\\n\", \"finished\": false, \"num_completion_tokens\": 1 } }\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \"I\", \"finished\": false, \"num_completion_tokens\": 2 } }\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \" don\", \"finished\": false, \"num_completion_tokens\": 3 } }\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \"\u2019\", \"finished\": false, \"num_completion_tokens\": 4 } }\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \"t\", \"finished\": true, \"num_completion_tokens\": 5 } }\n
    "},{"location":"api/python_client/#llmengine.completion.Completion.acreate","title":"acreate async classmethod","text":"
    acreate(\nmodel: str,\nprompt: str,\nmax_new_tokens: int = 20,\ntemperature: float = 0.2,\ntimeout: int = 10,\nstream: bool = False,\n) -> Union[\nCompletionSyncV1Response,\nAsyncIterable[CompletionStreamV1Response],\n]\n

    Creates a completion for the provided prompt and parameters asynchronously (with asyncio).

    Parameters:

    Name Type Description Default model str

    Name of the model to use. See Model Zoo for a list of Models that are supported.

    required prompt str

    The prompt to generate completions for, encoded as a string.

    required max_new_tokens int

    The maximum number of tokens to generate in the completion.

    The token count of your prompt plus max_new_tokens cannot exceed the model's context length. See Model Zoo for information on each supported model's context length.

    20 temperature float

    What sampling temperature to use, in the range (0, 1]. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.

    0.2 timeout int

    Timeout in seconds. This is the maximum amount of time you are willing to wait for a response.

    10 stream bool

    Whether to stream the response. If true, the return type is an Iterator[CompletionStreamV1Response]. Otherwise, the return type is a CompletionSyncV1Response. When streaming, tokens will be sent as data-only server-sent events.

    False

    Returns:

    Name Type Description response Union[CompletionSyncV1Response, AsyncIterable[CompletionStreamV1Response]]

    The generated response (if streaming=False) or iterator of response chunks (if streaming=True)

    Example without token streaming
    import asyncio\nfrom llmengine import Completion\nasync def main():\nresponse = await Completion.acreate(\nmodel=\"llama-7b\",\nprompt=\"Hello, my name is\",\nmax_new_tokens=10,\ntemperature=0.2,\n)\nprint(response.json())\nasyncio.run(main())\n
    JSON response
    {\n\"request_id\": \"b1b2c3d4e5f6g7h8i9j0\",\n\"outputs\":\n[\n{\n\"text\": \"_______, and I am a _____\",\n\"num_completion_tokens\": 10\n}\n],\n}\n
    Example with token streaming
    import asyncio\nfrom llmengine import Completion\nasync def main():\nstream = await Completion.acreate(\nmodel=\"llama-7b\",\nprompt=\"why is the sky blue?\",\nmax_new_tokens=5,\ntemperature=0.2,\nstream=True,\n)\nasync for response in stream:\nif response.output:\nprint(response.json())\nasyncio.run(main())\n
    JSON responses
    {\"request_id\": \"0123456789\", \"output\": {\"text\": \"\\n\", \"finished\": false, \"num_completion_tokens\": 1}}\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \"I\", \"finished\": false, \"num_completion_tokens\": 2}}\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \" think\", \"finished\": false, \"num_completion_tokens\": 3}}\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \" the\", \"finished\": false, \"num_completion_tokens\": 4}}\n{\"request_id\": \"0123456789\", \"output\": {\"text\": \" sky\", \"finished\": true, \"num_completion_tokens\": 5}}\n
    "},{"location":"api/python_client/#llmengine.FineTune","title":"FineTune","text":"

    Bases: APIEngine

    FineTune API. This API is used to fine-tune models.

    Fine-tuning is a process where the LLM is further trained on a task-specific dataset, allowing the model to adjust its parameters to better align with the task at hand. Fine-tuning involves the supervised training phase, where prompt/response pairs are provided to optimize the performance of the LLM.

    Scale LLMEngine provides APIs to create fine-tunes on a base-model with training & validation data-sets. APIs are also provided to get, list and cancel fine-tuning jobs.and cancel fine-tuning jobs.

    Creating a fine-tune will end with the creation of a Model, which you can view using Model.get(model_name) or delete using Model.delete(model_name).

    "},{"location":"api/python_client/#llmengine.fine_tuning.FineTune.create","title":"create classmethod","text":"
    create(\nmodel: str,\ntraining_file: str,\nvalidation_file: Optional[str] = None,\nhyperparameters: Optional[Dict[str, str]] = None,\nsuffix: Optional[str] = None,\n) -> CreateFineTuneResponse\n

    Creates a job that fine-tunes a specified model from a given dataset.

    Parameters:

    Name Type Description Default model `str`

    The name of the base model to fine-tune. See Model Zoo for the list of available models to fine-tune.

    required training_file `str`

    Path to file of training dataset

    required validation_file `Optional[str]`

    Path to file of validation dataset

    None hyperparameters `str`

    Hyperparameters

    None suffix `Optional[str]`

    A string that will be added to your fine-tuned model name.

    None

    Returns:

    Name Type Description CreateFineTuneResponse CreateFineTuneResponse

    an object that contains the ID of the created fine-tuning job

    The model is the name of base model (Model Zoo for available models) to fine. The training file should consist of prompt and response pairs. Your data must be formatted as a CSV file that includes two columns: prompt and response. A maximum of 100,000 rows of data is currently supported. At least 200 rows of data is recommended to start to see benefits from fine-tuning.

    Here is an example script to create a 5-row CSV of properly formatted data for fine-tuning an airline question answering bot:

    import csv\n# Define data\ndata = [\n(\"What is your policy on carry-on luggage?\", \"Our policy allows each passenger to bring one piece of carry-on luggage and one personal item such as a purse or briefcase. The maximum size for carry-on luggage is 22 x 14 x 9 inches.\"),\n(\"How can I change my flight?\", \"You can change your flight through our website or mobile app. Go to 'Manage my booking' section, enter your booking reference and last name, then follow the prompts to change your flight.\"),\n(\"What meals are available on my flight?\", \"We offer a variety of meals depending on the flight's duration and route. These can range from snacks and light refreshments to full-course meals on long-haul flights. Specific meal options can be viewed during the booking process.\"),\n(\"How early should I arrive at the airport before my flight?\", \"We recommend arriving at least two hours before domestic flights and three hours before international flights.\"),\n\"Can I select my seat in advance?\", \"Yes, you can select your seat during the booking process or afterwards via the 'Manage my booking' section on our website or mobile app.\"),\n]\n# Write data to a CSV file\nwith open('customer_service_data.csv', 'w', newline='') as file:\nwriter = csv.writer(file)\nwriter.writerow([\"prompt\", \"response\"])\nwriter.writerows(data)\n
    Example code for fine-tuning
    from llmengine import FineTune\nresponse = FineTune.create(\nmodel=\"llama-7b\",\ntraining_file=\"s3://my-bucket/path/to/training-file.csv\",\n)\nprint(response.json())\n
    JSON Response
    {\n\"fine_tune_id\": \"ft_abc123\"\n}\n
    "},{"location":"api/python_client/#llmengine.fine_tuning.FineTune.get","title":"get classmethod","text":"
    get(fine_tune_id: str) -> GetFineTuneResponse\n

    Get status of a fine-tuning job

    Parameters:

    Name Type Description Default fine_tune_id `str`

    ID of the fine-tuning job

    required

    Returns:

    Name Type Description GetFineTuneResponse GetFineTuneResponse

    an object that contains the ID and status of the requested job

    Example
    from llmengine import FineTune\nresponse = FineTune.get(\nfine_tune_id=\"ft_abc123\",\n)\nprint(response.json())\n
    JSON Response
    {\n\"fine_tune_id\": \"ft_abc123\",\n\"status\": \"RUNNING\"\n}\n
    "},{"location":"api/python_client/#llmengine.fine_tuning.FineTune.list","title":"list classmethod","text":"
    list() -> ListFineTunesResponse\n

    List fine-tuning jobs

    Returns:

    Name Type Description ListFineTunesResponse ListFineTunesResponse

    an object that contains a list of all fine-tuning jobs and their statuses

    Example
    from llmengine import FineTune\nresponse = FineTune.list()\nprint(response.json())\n
    JSON Response
    {\n\"jobs\": [\n{\n\"fine_tune_id\": \"ft_abc123\",\n\"status\": \"RUNNING\"\n},\n{\n\"fine_tune_id\": \"ft_def456\",\n\"status\": \"SUCCESS\"\n}\n]\n}\n
    "},{"location":"api/python_client/#llmengine.fine_tuning.FineTune.cancel","title":"cancel classmethod","text":"
    cancel(fine_tune_id: str) -> CancelFineTuneResponse\n

    Cancel a fine-tuning job

    Parameters:

    Name Type Description Default fine_tune_id `str`

    ID of the fine-tuning job

    required

    Returns:

    Name Type Description CancelFineTuneResponse CancelFineTuneResponse

    an object that contains whether the cancellation was successful

    Example
    from llmengine import FineTune\nresponse = FineTune.cancel(fine_tune_id=\"ft_abc123\")\nprint(response.json())\n
    JSON Response
    {\n\"success\": true\n}\n
    "},{"location":"api/python_client/#llmengine.Model","title":"Model","text":"

    Bases: APIEngine

    Model API. This API is used to get, list, delete, and (in the self-hosted case) create models. When using Scale Spellbook, create models using FineTune.create().

    See Model Zoo for the list of publicly available models.

    "},{"location":"api/python_client/#llmengine.model.Model.get","title":"get classmethod","text":"
    get(model_name: str) -> GetLLMEndpointResponse\n

    Get information about an LLM model endpoint.

    Parameters:

    Name Type Description Default model_name `str`

    Name of the model

    required

    Returns:

    Name Type Description GetLLMEndpointResponse GetLLMEndpointResponse

    object representing the LLM endpoint and configurations

    Example
    from llmengine import Model\nresponse = Model.get(\"llama-7b.suffix.2023-07-18-12-00-00\")\nprint(response.json())\n
    JSON Response
    {\n\"id\": \"end_abc123\",\n\"name\": \"llama-7b.suffix.2023-07-18-12-00-00\",\n\"model_name\": \"llama-7b\",\n\"source\": \"hugging_face\",\n\"inference_framework\": \"text_generation_inference\",\n\"num_shards\": 4\n}\n
    "},{"location":"api/python_client/#llmengine.model.Model.list","title":"list classmethod","text":"
    list() -> ListLLMEndpointsResponse\n

    List LLM model endpoints available to call inference on. This includes publicly available endpoints as well as your fine-tuned model endpoints.

    Returns:

    Name Type Description ListLLMEndpointsResponse ListLLMEndpointsResponse

    list of model endpoints

    Example
    from llmengine import Model\nresponse = Model.list()\nprint(response.json())\n
    JSON Response
    {\n\"model_endpoints\": [\n{\n\"id\": \"end_abc123\",\n\"name\": \"llama-7b\",\n\"model_name\": \"llama-7b\",\n\"source\": \"hugging_face\",\n\"inference_framework\": \"text_generation_inference\",\n\"num_shards\": 4\n},\n{\n\"id\": \"end_def456\",\n\"name\": \"llama-13b-deepspeed-sync\",\n\"model_name\": \"llama-13b-deepspeed-sync\",\n\"source\": \"hugging_face\",\n\"inference_framework\": \"deepspeed\",\n\"num_shards\": 4\n},\n{\n\"id\": \"end_ghi789\",\n\"name\": \"falcon-40b\",\n\"model_name\": \"falcon-40b\",\n\"source\": \"hugging_face\",\n\"inference_framework\": \"text_generation_inference\",\n\"num_shards\": 4\n},\n{\n\"id\": \"end_jkl012\",\n\"name\": \"mpt-7b-instruct\",\n\"model_name\": \"mpt-7b-instruct\",\n\"source\": \"hugging_face\",\n\"inference_framework\": \"text_generation_inference\",\n\"num_shards\": 4\n}\n]\n}\n
    "},{"location":"api/python_client/#llmengine.model.Model.delete","title":"delete classmethod","text":"
    delete(model_name: str) -> DeleteLLMEndpointResponse\n

    Deletes an LLM model endpoint.

    Parameters:

    Name Type Description Default model_name `str`

    Name of the model

    required

    Returns:

    Name Type Description response DeleteLLMEndpointResponse

    whether the model was successfully deleted

    Example
    from llmengine import Model\nresponse = Model.delete(\"llama-7b.suffix.2023-07-18-12-00-00\")\nprint(response.json())\n
    JSON Response
    {\n\"deleted\": true\n}\n
    "},{"location":"guides/completions/","title":"Completions","text":"

    Language Models are trained to understand natural language and provide text outputs as a response to their inputs. The inputs are called prompts and outputs are referred to as completions. LLMs take the input prompts and chunk them smaller units called tokens to process and generate language. Tokens may include trailing spaces and even sub-words, this process is language dependent.

    Scale LLM Engine provides access to open source language models (see Model Zoo) that can be used for producing completions to prompts.

    "},{"location":"guides/completions/#completion-api-call","title":"Completion API call","text":"

    An example API call looks as follows:

    from llmengine import Completion\nresponse = Completion.create(\nmodel=\"llama-7b\",\nprompt=\"Hello, my name is\",\nmax_new_tokens=10,\ntemperature=0.2,\n)\n

    The model_name is the LLM to be used (see Model Zoo). The prompt is the main input for the LLM to respond to. The max_new_tokens parameter is the maximum number of tokens to generate in the chat completion. The temperature is the sampling temperature to use. Higher values make the output more random, while lower values will make it more focussed and deterministic.

    See the full API reference documentation to learn more.

    "},{"location":"guides/completions/#completion-api-response","title":"Completion API response","text":"

    An example Completion API response looks as follows:

    {\n\"outputs\": [\n{\n\"text\": \"_______ and I am a _______\",\n\"num_completion_tokens\": 10\n}\n]\n}\n

    In Python, the response is of type CompletionSyncV1Response, which maps to the above JSON structure.

    print( response.outputs[0].text )\n
    "},{"location":"guides/completions/#token-streaming","title":"Token streaming","text":"

    The Completions API support token streaming to reduce perceived latency for certain applications. When streaming, tokens will be sent as data-only server-side events.

    To enable token streaming, pass stream=True to either Completion.create or Completion.acreate.

    An example of token streaming using the synchronous Completions API looks as follows:

    from llmengine import Completion\nstream = Completion.create(\nmodel=\"llama-7b\",\nprompt=\"why is the sky blue?\",\nmax_new_tokens=5,\ntemperature=0.2,\nstream=True,\n)\nfor response in stream:\nif response.output:\nprint(response.json())\n
    "},{"location":"guides/completions/#async-requests","title":"Async requests","text":"

    The Python client supports asyncio for creating Completions. Use Completion.acreate instead of Completion.create to utilize async processing. The function signatures are otherwise identical.

    An example of async Completions looks as follows:

    import asyncio\nfrom llmengine import Completion\nasync def main():\nresponse = await Completion.acreate(\nmodel=\"llama-7b\",\nprompt=\"Hello, my name is\",\nmax_new_tokens=10,\ntemperature=0.2,\n)\nprint(response.json())\nasyncio.run(main())\n
    "},{"location":"guides/completions/#which-model-should-i-use","title":"Which model should I use?","text":"

    See the Model Zoo for more information on best practices for which model to use for Completions.

    "},{"location":"guides/fine_tuning/","title":"Fine-tuning","text":"

    Learn how to customize your models on your data with fine-tuning.

    "},{"location":"guides/fine_tuning/#introduction","title":"Introduction","text":"

    Fine-tuning helps improve model performance by training on specific examples of prompts and desired responses. LLMs are initially trained on data collected from the entire internet. With fine-tuning, LLMs can be optimized to perform better in a specific domain by learning from examples for that domain. Smaller LLMs that have been fine-tuned on a specific use case often outperform larger ones that were trained more generally.

    Fine-tuning allows for:

    1. Higher quality results than prompt engineering alone
    2. Cost savings through shorter prompts
    3. The ability to reach equivalent accuracy with a smaller model
    4. Lower latency at inference time
    5. The chance to show an LLM more examples than can fit in a single context window

    LLM Engine's fine-tuning API lets you fine-tune various open source LLMs on your own data and then make inference calls to the resulting LLM. For more specific details, see the fine-tuning API Python client reference.

    "},{"location":"guides/fine_tuning/#producing-high-quality-data-for-fine-tuning","title":"Producing high quality data for fine-tuning","text":"

    The training data for fine-tuning should consist of prompt and response pairs.

    As a rule of thumb, you should expect to see linear improvements in your fine-tuned model's quality with each doubling of the dataset size. Having high-quality data is also essential to improving performance. For every linear increase in the error rate in your training data, you may encounter a roughly quadratic increase in your fine-tuned model's error rate.

    High quality data is critical to achieve improved model performance, and in several cases will require experts to generate and prepare data - the breadth and diversity of the data is highly critical. Scale's Data Engine can help prepare such high quality, diverse data sets - more information here.

    "},{"location":"guides/fine_tuning/#preparing-data","title":"Preparing data","text":"

    Your data must be formatted as a CSV file that includes two columns: prompt and response. A maximum of 100,000 rows of data is currently supported. At least 200 rows of data is recommended to start to see benefits from fine-tuning.

    Here is an example script to create a 50-row CSV of properly formatted data for fine-tuning an airline question answering bot:

    Creating a sample dataset
    import csv\n# Define data\ndata = [\n(\"What is your policy on carry-on luggage?\", \"Our policy allows each passenger to bring one piece of carry-on luggage and one personal item such as a purse or briefcase. The maximum size for carry-on luggage is 22 x 14 x 9 inches.\"),\n(\"How can I change my flight?\", \"You can change your flight through our website or mobile app. Go to 'Manage my booking' section, enter your booking reference and last name, then follow the prompts to change your flight.\"),\n(\"What meals are available on my flight?\", \"We offer a variety of meals depending on the flight's duration and route. These can range from snacks and light refreshments to full-course meals on long-haul flights. Specific meal options can be viewed during the booking process.\"),\n(\"How early should I arrive at the airport before my flight?\", \"We recommend arriving at least two hours before domestic flights and three hours before international flights.\"),\n(\"Can I select my seat in advance?\", \"Yes, you can select your seat during the booking process or afterwards via the 'Manage my booking' section on our website or mobile app.\"),\n(\"What should I do if my luggage is lost?\", \"If your luggage is lost, please report this immediately at our 'Lost and Found' counter at the airport. We will assist you in tracking your luggage.\"),\n(\"Do you offer special assistance for passengers with disabilities?\", \"Yes, we offer special assistance for passengers with disabilities. Please notify us of your needs at least 48 hours prior to your flight.\"),\n(\"Can I bring my pet on the flight?\", \"Yes, we allow small pets in the cabin, and larger pets in the cargo hold. Please check our pet policy for more details.\"),\n(\"What is your policy on flight cancellations?\", \"In case of flight cancellations, we aim to notify passengers as early as possible and offer either a refund or a rebooking on the next available flight.\"),\n(\"Can I get a refund if I cancel my flight?\", \"Refunds depend on the type of ticket purchased. Please check our cancellation policy for details. Non-refundable tickets, however, are typically not eligible for refunds unless due to extraordinary circumstances.\"),\n(\"How can I check-in for my flight?\", \"You can check-in for your flight either online, through our mobile app, or at the airport. Online and mobile app check-in opens 24 hours before departure and closes 90 minutes before.\"),\n(\"Do you offer free meals on your flights?\", \"Yes, we serve free meals on all long-haul flights. For short-haul flights, we offer a complimentary drink and snack. Special meal requests should be made at least 48 hours before departure.\"),\n(\"Can I use my electronic devices during the flight?\", \"Small electronic devices can be used throughout the flight in flight mode. Larger devices like laptops may be used above 10,000 feet.\"),\n(\"How much baggage can I check-in?\", \"The checked baggage allowance depends on the class of travel and route. The details would be mentioned on your ticket, or you can check on our website.\"),\n(\"How can I request for a wheelchair?\", \"To request a wheelchair or any other special assistance, please call our customer service at least 48 hours before your flight.\"),\n(\"Do I get a discount for group bookings?\", \"Yes, we offer discounts on group bookings of 10 or more passengers. Please contact our group bookings team for more information.\"),\n(\"Do you offer Wi-fi on your flights?\", \"Yes, we offer complimentary Wi-fi on select flights. You can check the availability during the booking process.\"),\n(\"What is the minimum connecting time between flights?\", \"The minimum connecting time varies depending on the airport and whether your flight is international or domestic. Generally, it's recommended to allow at least 45-60 minutes for domestic connections and 60-120 minutes for international.\"),\n(\"Do you offer duty-free shopping on international flights?\", \"Yes, we have a selection of duty-free items that you can pre-order on our website or purchase onboard on international flights.\"),\n(\"Can I upgrade my ticket to business class?\", \"Yes, you can upgrade your ticket through the 'Manage my booking' section on our website or by contacting our customer service. The availability and costs depend on the specific flight.\"),\n(\"Can unaccompanied minors travel on your flights?\", \"Yes, we do accommodate unaccompanied minors on our flights, with special services to ensure their safety and comfort. Please contact our customer service for more details.\"),\n(\"What amenities do you provide in business class?\", \"In business class, you will enjoy additional legroom, reclining seats, premium meals, priority boarding and disembarkation, access to our business lounge, extra baggage allowance, and personalized service.\"),\n(\"How much does extra baggage cost?\", \"Extra baggage costs vary based on flight route and the weight of the baggage. Please refer to our 'Extra Baggage' section on the website for specific rates.\"),\n(\"Are there any specific rules for carrying liquids in carry-on?\", \"Yes, liquids carried in your hand luggage must be in containers of 100 ml or less and they should all fit into a single, transparent, resealable plastic bag of 20 cm x 20 cm.\"),\n(\"What if I have a medical condition that requires special assistance during the flight?\", \"We aim to make the flight comfortable for all passengers. If you have a medical condition that may require special assistance, please contact our \u2018special services\u2019 team 48 hours before your flight.\"),\n(\"What in-flight entertainment options are available?\", \"We offer a range of in-flight entertainment options including a selection of movies, TV shows, music, and games, available on your personal seat-back screen.\"),\n(\"What types of payment methods do you accept?\", \"We accept credit/debit cards, PayPal, bank transfers, and various other forms of payment. The available options may vary depending on the country of departure.\"),\n(\"How can I earn and redeem frequent flyer miles?\", \"You can earn miles for every journey you take with us or our partner airlines. These miles can be redeemed for flight tickets, upgrades, or various other benefits. To earn and redeem miles, you need to join our frequent flyer program.\"),\n(\"Can I bring a stroller for my baby?\", \"Yes, you can bring a stroller for your baby. It can be checked in for free and will normally be given back to you at the aircraft door upon arrival.\"),\n(\"What age does my child have to be to qualify as an unaccompanied minor?\", \"Children aged between 5 and 12 years who are traveling alone are considered unaccompanied minors. Our team provides special care for these children from departure to arrival.\"),\n(\"What documents do I need to travel internationally?\", \"For international travel, you need a valid passport and may also require visas, depending on your destination and your country of residence. It's important to check the specific requirements before you travel.\"),\n(\"What happens if I miss my flight?\", \"If you miss your flight, please contact our customer service immediately. Depending on the circumstances, you may be able to rebook on a later flight, but additional fees may apply.\"),\n(\"Can I travel with my musical instrument?\", \"Yes, small musical instruments can be brought on board as your one carry-on item. Larger instruments must be transported in the cargo, or if small enough, a seat may be purchased for them.\"),\n(\"Do you offer discounts for children or infants?\", \"Yes, children aged 2-11 traveling with an adult usually receive a discount on the fare. Infants under the age of 2 who do not occupy a seat can travel for a reduced fare or sometimes for free.\"),\n(\"Is smoking allowed on your flights?\", \"No, all our flights are non-smoking for the comfort and safety of all passengers.\"),\n(\"Do you have family seating?\", \"Yes, we offer the option to seat families together. You can select seats during booking or afterwards through the 'Manage my booking' section on the website.\"),\n(\"Is there any discount for senior citizens?\", \"Some flights may offer a discount for senior citizens. Please check our website or contact customer service for accurate information.\"),\n(\"What items are prohibited on your flights?\", \"Prohibited items include, but are not limited to, sharp objects, firearms, explosive materials, and certain chemicals. You can find a comprehensive list on our website under the 'Security Regulations' section.\"),\n(\"Can I purchase a ticket for someone else?\", \"Yes, you can purchase a ticket for someone else. You'll need their correct name as it appears on their government-issued ID, and their correct travel dates.\"),\n(\"What is the process for lost and found items on the plane?\", \"If you realize you forgot an item on the plane, report it as soon as possible to our lost and found counter. We will make every effort to locate and return your item.\"),\n(\"Can I request a special meal?\", \"Yes, we offer a variety of special meals to accommodate dietary restrictions. Please request your preferred meal at least 48 hours prior to your flight.\"),\n(\"Is there a weight limit for checked baggage?\", \"Yes, luggage weight limits depend on your ticket class and route. You can find the details on your ticket or by visiting our website.\"),\n(\"Can I bring my sports equipment?\", \"Yes, certain types of sports equipment can be carried either as or in addition to your permitted baggage. Some equipment may require additional fees. It's best to check our policy on our website or contact us directly.\"),\n(\"Do I need a visa to travel to certain countries?\", \"Yes, visa requirements depend on the country you are visiting and your nationality. We advise checking with the relevant embassy or consulate prior to travel.\"),\n(\"How can I add extra baggage to my booking?\", \"You can add extra baggage to your booking through the 'Manage my booking' section on our website or by contacting our customer services.\"),\n(\"Can I check-in at the airport?\", \"Yes, you can choose to check-in at the airport. However, we also offer online and mobile check-in, which may save you time.\"),\n(\"How do I know if my flight is delayed or cancelled?\", \"In case of any changes to your flight, we will attempt to notify all passengers using the contact information given at the time of booking. You can also check your flight status on our website.\"),\n(\"What is your policy on pregnant passengers?\", \"Pregnant passengers can travel up to the end of the 36th week for single pregnancies, and the end of the 32nd week for multiple pregnancies. We recommend consulting your doctor before any air travel.\"),\n(\"Can children travel alone?\", \"Yes, children age 5 to 12 can travel alone as unaccompanied minors. We provide special care for these seats. Please contact our customer service for more information.\"),\n(\"How can I pay for my booking?\", \"You can pay for your booking using a variety of methods including credit and debit cards, PayPal, or bank transfers. The options may vary depending on the country of departure.\"),\n]\n# Write data to a CSV file\nwith open('customer_service_data.csv', 'w', newline='') as file:\nwriter = csv.writer(file)\nwriter.writerow([\"prompt\", \"response\"])\nwriter.writerows(data)\n
    "},{"location":"guides/fine_tuning/#making-your-data-accessible-to-llm-engine","title":"Making your data accessible to LLM Engine","text":"

    Currently, data needs to be uploaded to a publicly accessible web URL so that it can be read for fine-tuning. Publicly accessible HTTP, HTTPS, and S3 URLs are currently supported. Support for privately sharing data with the LLM Engine API is coming shortly. For quick iteration, you can look into tools like Pastebin or Github Gists to quickly host your CSV files in a public manner. We created an example Github Gist you can see here. To use the gist, you can just use the URL given when you click the \u201cRaw\u201d button (URL).

    "},{"location":"guides/fine_tuning/#launching-the-fine-tune","title":"Launching the fine-tune","text":"

    Once you have uploaded your data, you can use the LLM Engine API to launch a fine-tune. You will need to specify which base model to fine-tune, the locations of the training file and optional validation data file, an optional set of hyperparameters to customize the fine-tuning behavior, and an optional suffix to append to the name of the fine-tune.

    Create a fine-tune
    from llmengine import FineTune\nresponse = FineTune.create(\nmodel=\"llama-7b\",\ntraining_file=\"s3://my-bucket/path/to/training-file.csv\",\n)\nprint(response.json())\n

    See the Model Zoo to see which models have fine-tuning support.

    Once the fine-tune is launched, you can also get the status of your fine-tune.

    "},{"location":"guides/fine_tuning/#making-inference-calls-to-your-fine-tune","title":"Making inference calls to your fine-tune","text":"

    Once your fine-tune is finished, you will be able to start making inference requests to the model. You can use the fine_tune_id returned from your FineTune.create API call to reference your fine-tuned model in the Completions API. Alternatively, you can list available LLMs with Model.list in order to find the name of your fine-tuned model. See the Completion API for more details. You can then use that name to direct your completion requests.

    Inference with a fine-tuned model
    from llmengine import Completion\nresponse = Completion.create(\nmodel_name=\"ft_abc123\",\nprompt=\"Do you offer in-flight Wi-fi?\",\nmax_new_tokens=100,\ntemperature=0.2,\n)\nprint(response.json())\n
    "},{"location":"guides/rate_limits/","title":"Overview","text":""},{"location":"guides/rate_limits/#what-are-rate-limits","title":"What are rate limits?","text":"

    A rate limit is a restriction that an API imposes on the number of times a user or client can access the server within a specified period of time.

    "},{"location":"guides/rate_limits/#how-do-i-know-if-i-am-rate-limited","title":"How do I know if I am rate limited?","text":"

    Per standard HTTP practices, your request will receive a response with HTTP status code of 429, Too Many Requests.

    "},{"location":"guides/rate_limits/#what-are-the-rate-limits-for-our-api","title":"What are the rate limits for our API?","text":"

    The LLM Engine API is currently in a preview mode, and therefore we currently do not have any advertised rate limits. As the API moves towards a production release, we will update this section with specific rate limits. For now, the API will return HTTP 429 on an as-needed basis.

    "},{"location":"guides/rate_limits/#error-mitigation","title":"Error mitigation","text":""},{"location":"guides/rate_limits/#retrying-with-exponential-backoff","title":"Retrying with exponential backoff","text":"

    One easy way to avoid rate limit errors is to automatically retry requests with a random exponential backoff. Retrying with exponential backoff means performing a short sleep when a rate limit error is hit, then retrying the unsuccessful request. If the request is still unsuccessful, the sleep length is increased and the process is repeated. This continues until the request is successful or until a maximum number of retries is reached. This approach has many benefits:

    • Automatic retries means you can recover from rate limit errors without crashes or missing data
    • Exponential backoff means that your first retries can be tried quickly, while still benefiting from longer delays if your first few retries fail
    • Adding random jitter to the delay helps retries from all hitting at the same time.

    Below are a few example solutions for Python that use exponential backoff.

    "},{"location":"guides/rate_limits/#example-1-using-the-tenacity-library","title":"Example #1: Using the tenacity library","text":"

    Tenacity is an Apache 2.0 licensed general-purpose retrying library, written in Python, to simplify the task of adding retry behavior to just about anything. To add exponential backoff to your requests, you can use the tenacity.retry decorator. The below example uses the tenacity.wait_random_exponential function to add random exponential backoff to a request.

    import llmengine\nfrom tenacity import (\nretry,\nstop_after_attempt,\nwait_random_exponential,\n)  # for exponential backoff\n@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))\ndef completion_with_backoff(**kwargs):\nreturn llmengine.Completion.create(**kwargs)\ncompletion_with_backoff(model=\"llama-7b\", prompt=\"Why is the sky blue?\")\n
    "},{"location":"guides/rate_limits/#example-2-using-the-backoff-library","title":"Example #2: Using the backoff library","text":"

    Another python library that provides function decorators for backoff and retry is backoff:

    import llmengine\nimport backoff\n@backoff.on_exception(backoff.expo, llmengine.error.RateLimitError)\ndef completions_with_backoff(**kwargs):\nreturn llmengine.Completion.create(**kwargs)\ncompletions_with_backoff(model=\"llama-7b\", prompt=\"Why is the sky blue?\")\n
    "},{"location":"guides/token_streaming/","title":"Token streaming","text":"

    The Completions APIs support a stream boolean parameter that, when True, will return a streamed response of token-by-token server-sent events (SSEs) rather than waiting to receive the full response when model generation has finished. This decreases latency of when you start getting a response.

    The response will consist of SSEs of the form {\"token\": dict, \"generated_text\": str | null, \"details\": dict | null}, where the dictionary for each token will contain log probability information in addition to the generated string; the generated_text field will be null for all but the last SSE, for which it will contain the full generated response.

    "}]} \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz index 8bc449a2e..180a7496c 100644 Binary files a/sitemap.xml.gz and b/sitemap.xml.gz differ