Skip to content

Commit

Permalink
feat: test cases for anyscale integration
Browse files Browse the repository at this point in the history
  • Loading branch information
noble-varghese committed Sep 21, 2023
1 parent 57ad2a0 commit 6c3f56b
Show file tree
Hide file tree
Showing 4 changed files with 816 additions and 0 deletions.
204 changes: 204 additions & 0 deletions tests/anyscale_tests/test_anyscale_CodeLlama-34b-Instruct-hf.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,204 @@
from __future__ import annotations

import os
from typing import Any
import pytest
import portkey
from portkey import TextCompletion, TextCompletionChunk, Config, LLMOptions
from dotenv import load_dotenv

# from tests.utils import assert_matches_type
load_dotenv()
base_url = os.environ.get("PORTKEY_BASE_URL")
api_key = os.environ.get("PORTKEY_API_KEY")
anyscale_api_key = os.environ.get("ANYSCALE_API_KEY")


class TestAnyscaleCompletions:
client = portkey
client.api_key = api_key
parametrize = pytest.mark.parametrize("client", [client], ids=["strict"])

@parametrize
def test_method_create_non_stream(self, client: Any) -> None:
config = Config(
mode="single",
llms=LLMOptions(
api_key=anyscale_api_key,
provider="anyscale",
metadata={"_user": "portkey-python-sdk"},
model="codellama/CodeLlama-34b-Instruct-hf",
),
)
client.config = config
completion = client.Completions.create(
max_tokens=256,
prompt="why is the sky blue ?",
)
# assert("True", "True")

# assert_matches_type(TextCompletion, completion, path=["response"])

@parametrize
def test_method_create_with_all_params_non_stream(self, client: Any) -> None:
config = Config(
mode="single",
llms=LLMOptions(
api_key=anyscale_api_key,
provider="anyscale",
metadata={"_user": "portkey-python-sdk"},
model="codellama/CodeLlama-34b-Instruct-hf",
),
)
client.config = config
completion = client.Completions.create(
max_tokens=256,
prompt="why is the sky blue ?",
stop_sequences=["string", "string", "string"],
stream=False,
temperature=1,
top_k=5,
top_p=0.7,
)
# assert("True", "True")
# assert_matches_type(TextCompletion, completion, path=["response"])

@parametrize
def test_method_create_streaming(self, client: Any) -> None:
config = Config(
mode="single",
llms=LLMOptions(
api_key=anyscale_api_key,
provider="anyscale",
metadata={"_user": "portkey-python-sdk"},
model="codellama/CodeLlama-34b-Instruct-hf",
),
)
client.config = config
completion_streaming = client.Completions.create(
max_tokens=256,
prompt="why is the sky blue ?",
stream=True,
)
# assert("True", "True")

# for chunk in completion_streaming:
# assert_matches_type(TextCompletionChunk, chunk, path=["response"])

@parametrize
def test_method_create_with_all_params_streaming(self, client: Any) -> None:
config = Config(
mode="single",
llms=LLMOptions(
api_key=anyscale_api_key,
provider="anyscale",
metadata={"_user": "portkey-python-sdk"},
model="codellama/CodeLlama-34b-Instruct-hf",
),
)
client.config = config
completion_streaming = client.Completions.create(
max_tokens=256,
prompt="why is the sky blue ?",
stream=True,
stop_sequences=["string", "string", "string"],
temperature=1,
top_k=5,
top_p=0.7,
)
# assert("True", "True")


class TestAnyscaleChatCompletions:
client = portkey
client.api_key = api_key
parametrize = pytest.mark.parametrize("client", [client], ids=["strict"])

@parametrize
def test_method_create_non_stream(self, client: Any) -> None:
config = Config(
mode="single",
llms=LLMOptions(
api_key=anyscale_api_key,
provider="anyscale",
metadata={"_user": "portkey-python-sdk"},
model="codellama/CodeLlama-34b-Instruct-hf",
),
)
client.config = config
completion = client.ChatCompletions.create(
max_tokens=256,
messages=[{"role": "user", "content": "why is the sky blue ?"}],
)
# assert("True", "True")

# assert_matches_type(TextCompletion, completion, path=["response"])

@parametrize
def test_method_create_with_all_params_non_stream(self, client: Any) -> None:
config = Config(
mode="single",
llms=LLMOptions(
api_key=anyscale_api_key,
provider="anyscale",
metadata={"_user": "portkey-python-sdk"},
model="codellama/CodeLlama-34b-Instruct-hf",
),
)
client.config = config
completion = client.ChatCompletions.create(
max_tokens=256,
messages=[{"role": "user", "content": "why is the sky blue ?"}],
stop_sequences=["string", "string", "string"],
stream=False,
temperature=1,
top_k=5,
top_p=0.7,
)
# assert("True", "True")
# assert_matches_type(TextCompletion, completion, path=["response"])

@parametrize
def test_method_create_streaming(self, client: Any) -> None:
config = Config(
mode="single",
llms=LLMOptions(
api_key=anyscale_api_key,
provider="anyscale",
metadata={"_user": "portkey-python-sdk"},
model="codellama/CodeLlama-34b-Instruct-hf",
),
)
client.config = config
completion_streaming = client.ChatCompletions.create(
max_tokens=256,
messages=[{"role": "user", "content": "why is the sky blue ?"}],
stream=True,
)
# assert("True", "True")

# for chunk in completion_streaming:
# assert_matches_type(TextCompletionChunk, chunk, path=["response"])

@parametrize
def test_method_create_with_all_params_streaming(self, client: Any) -> None:
config = Config(
mode="single",
llms=LLMOptions(
api_key=anyscale_api_key,
provider="anyscale",
metadata={"_user": "portkey-python-sdk"},
model="codellama/CodeLlama-34b-Instruct-hf",
),
)
client.config = config
completion_streaming = client.ChatCompletions.create(
max_tokens=256,
messages=[{"role": "user", "content": "why is the sky blue ?"}],
stream=True,
stop_sequences=["string", "string", "string"],
temperature=1,
top_k=5,
top_p=0.7,
)
# assert("True", "True")
Loading

0 comments on commit 6c3f56b

Please sign in to comment.