Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dependency injection for model deployments #1787

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 2 additions & 6 deletions src/helm/benchmark/run_specs.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
import importlib
import itertools
from typing import Any, Callable, List, Dict, Optional, Set, TypeVar

from helm.common.hierarchical_logger import hlog, htrack
from helm.common.object_spec import ObjectSpec
from helm.common.object_spec import ObjectSpec, get_class_by_name
from helm.benchmark.adaptation.adapters.adapter_factory import (
ADAPT_LANGUAGE_MODELING,
ADAPT_MULTIPLE_CHOICE_JOINT,
Expand Down Expand Up @@ -2290,10 +2289,7 @@ def alter_run_spec(run_spec: RunSpec) -> RunSpec:
add_to_stop_expander = AddToStopRunExpander(anthropic.HUMAN_PROMPT)
increase_max_tokens_expander = IncreaseMaxTokensRunExpander(value=AnthropicClient.ADDITIONAL_TOKENS)
# Get scenario tags
components = run_spec.scenario_spec.class_name.split(".")
class_name = components[-1]
module_name = ".".join(components[:-1])
cls = getattr(importlib.import_module(module_name), class_name)
cls = get_class_by_name(run_spec.scenario_spec.class_name)
scenario_tags: List[str] = cls.tags
# If the scenario is instruction, do not use PROMPT_ANSWER_START
if "instructions" in scenario_tags:
Expand Down
20 changes: 14 additions & 6 deletions src/helm/common/object_spec.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import importlib
from dataclasses import dataclass
from typing import Any, Dict, Optional, Tuple
from typing import Any, Dict, Optional, Tuple, Type


@dataclass(frozen=True)
Expand All @@ -17,16 +17,24 @@ def __hash__(self):
return hash((self.class_name, tuple((k, self.args[k]) for k in sorted(self.args.keys()))))


def create_object(spec: ObjectSpec, additional_args: Optional[Dict[str, Any]] = None):
"""Create the actual object given the `spec`."""
# TODO: Refactor other places that use this pattern.
components = spec.class_name.split(".")
def get_class_by_name(full_class_name: str) -> Type[Any]:
components = full_class_name.split(".")
class_name = components[-1]
module_name = ".".join(components[:-1])
cls = getattr(importlib.import_module(module_name), class_name)
return getattr(importlib.import_module(module_name), class_name)


def create_object(spec: ObjectSpec, additional_args: Optional[Dict[str, Any]] = None):
"""Create the actual object given the `spec`."""
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Elaborate on documentation of additional_args

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm...I think it might be cleaner / more modular to have a function that takes a spec and updates the spec with additional_args, so that the create_object function can stay the same.

cls = get_class_by_name(spec.class_name)
args = {}
args.update(spec.args)
if additional_args:
key_collisions = set(args.keys()) & set(additional_args.keys())
if key_collisions:
raise ValueError(
f"Argument name collisions {key_collisions} when trying to create object of class {spec.class_name}"
)
args.update(additional_args)
return cls(**args)

Expand Down
63 changes: 47 additions & 16 deletions src/helm/proxy/clients/auto_client.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
import inspect
import os
from dataclasses import replace
from typing import Any, Dict, Mapping, Optional, TYPE_CHECKING
from typing import Any, Callable, Dict, Mapping, Optional, TYPE_CHECKING

from retrying import RetryError, Attempt

from helm.benchmark.model_deployment_registry import get_model_deployment
from helm.benchmark.model_deployment_registry import ModelDeployment, get_model_deployment
from helm.common.cache import CacheConfig, MongoCacheConfig, SqliteCacheConfig
from helm.common.hierarchical_logger import hlog
from helm.common.object_spec import create_object
from helm.common.object_spec import ObjectSpec, create_object, get_class_by_name
from helm.common.request import Request, RequestResult
from helm.common.tokenization_request import (
TokenizationRequest,
Expand Down Expand Up @@ -59,6 +60,48 @@ def _build_cache_config(self, organization: str) -> CacheConfig:
# TODO: Allow setting CacheConfig.follower_cache_path from a command line flag.
return SqliteCacheConfig(client_cache_path)

def _inject_init_args(self, spec: ObjectSpec, injectors: Dict[str, Callable[[], Any]]) -> ObjectSpec:
"""Return arguments needed by the class's __init__'s parameters.

This does a simple form of dependency injection. For each parameter in the class' __init__,
try to find a corresponding injector and call it to produce the argument value."""
cls = get_class_by_name(spec.class_name)
init_signature = inspect.signature(cls.__init__)
args = {}
args.update(spec.args)
missing_args = []
for parameter_name in init_signature.parameters.keys():
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess I'm confused what injectors are supposed to do...

if parameter_name == "self" or parameter_name in args:
continue
elif parameter_name in injectors:
args[parameter_name] = injectors[parameter_name]()
else:
missing_args.append(parameter_name)
if missing_args:
raise ValueError(f"Missing arguments {missing_args} for client_spec for {spec.class_name}")
return replace(spec, args=args)

def _create_client_for_model_deployment(self, model_deployment: ModelDeployment):
"""Create a client for the ModelDeployment."""

def get_api_key() -> str:
if "deployments" not in self.credentials:
raise AuthenticationError("Could not find key 'deployments' in credentials.conf")
deployment_api_keys = self.credentials["deployments"]
if model_deployment.name not in deployment_api_keys:
raise AuthenticationError(
f"Could not find key '{model_deployment.name}' under key 'deployments' in credentials.conf"
)
return deployment_api_keys[model_deployment.name]

def get_cache_config() -> CacheConfig:
organization: str = model_deployment.name.split("/")[0]
return self._build_cache_config(organization)

injectors = {"api_key": get_api_key, "cache_config": get_cache_config}
client_spec_with_injected_args = self._inject_init_args(model_deployment.client_spec, injectors)
return create_object(client_spec_with_injected_args)

def _get_client(self, model: str) -> Client:
"""Return a client based on the model, creating it if necessary."""
client: Optional[Client] = self.clients.get(model)
Expand All @@ -70,19 +113,7 @@ def _get_client(self, model: str) -> Client:
# TODO: Migrate all clients to use model deployments
model_deployment = get_model_deployment(model)
if model_deployment:
api_key = None
if "deployments" not in self.credentials:
raise AuthenticationError("Could not find key 'deployments' in credentials.conf")
deployment_api_keys = self.credentials["deployments"]
if model not in deployment_api_keys:
raise AuthenticationError(
f"Could not find key '{model}' under key 'deployments' in credentials.conf"
)
api_key = deployment_api_keys[model]
client = create_object(
model_deployment.client_spec, additional_args={"cache_config": cache_config, "api_key": api_key}
)

client = self._create_client_for_model_deployment(model_deployment)
elif get_huggingface_model_config(model):
from helm.proxy.clients.huggingface_client import HuggingFaceClient

Expand Down
28 changes: 12 additions & 16 deletions src/helm/proxy/clients/simple_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,23 +30,19 @@ def make_request(self, request: Request) -> RequestResult:
"n": request.num_completions,
}

if request.model_engine == "model1":
def do_it():
return self.invoke_model1(raw_request)

def do_it():
return self.invoke_model1(raw_request)

cache_key = Client.make_cache_key(raw_request, request)
response, cached = self.cache.get(cache_key, wrap_request_time(do_it))
completions = [
Sequence(
text=text,
logprob=logprob,
tokens=[Token(text=text, logprob=logprob, top_logprobs=response["completions"])],
)
for text, logprob in response["completions"].items()
]
else:
raise ValueError(f"Invalid model: {request.model}")
cache_key = Client.make_cache_key(raw_request, request)
response, cached = self.cache.get(cache_key, wrap_request_time(do_it))
completions = [
Sequence(
text=text,
logprob=logprob,
tokens=[Token(text=text, logprob=logprob, top_logprobs=response["completions"])],
)
for text, logprob in response["completions"].items()
]

return RequestResult(
success=True,
Expand Down
Loading