From b1bdcdd917f04b008fa1a11087a108ff04830d77 Mon Sep 17 00:00:00 2001 From: drisspg Date: Sat, 12 Aug 2023 10:23:39 -0700 Subject: [PATCH] formattin --- .../window_services/httpmodel_window_service.py | 2 +- src/helm/proxy/clients/http_client.py | 17 +++++------------ 2 files changed, 6 insertions(+), 13 deletions(-) diff --git a/src/helm/benchmark/window_services/httpmodel_window_service.py b/src/helm/benchmark/window_services/httpmodel_window_service.py index 020e1bf10d..c179fec8c8 100644 --- a/src/helm/benchmark/window_services/httpmodel_window_service.py +++ b/src/helm/benchmark/window_services/httpmodel_window_service.py @@ -24,4 +24,4 @@ def tokenizer_name(self) -> str: @property def prefix_token(self) -> str: - return self.end_of_text_token \ No newline at end of file + return self.end_of_text_token diff --git a/src/helm/proxy/clients/http_client.py b/src/helm/proxy/clients/http_client.py index b2382ec26c..e7e4f1843a 100644 --- a/src/helm/proxy/clients/http_client.py +++ b/src/helm/proxy/clients/http_client.py @@ -68,14 +68,10 @@ def do_it(): response, cached = do_it(), False tokens = [ - Token( - text=token["text"], logprob=token["logprob"], top_logprobs=token["top_logprob"] - ) + Token(text=token["text"], logprob=token["logprob"], top_logprobs=token["top_logprob"]) for token in response["tokens"] ] - completions = [ - Sequence(text=response["text"], logprob=response["logprob"], tokens=tokens) - ] + completions = [Sequence(text=response["text"], logprob=response["logprob"], tokens=tokens)] return RequestResult( success=True, @@ -87,9 +83,7 @@ def do_it(): ) except requests.exceptions.RequestException as e: error: str = f"Request error: {e}" - return RequestResult( - success=False, cached=False, error=error, completions=[], embedding=[] - ) + return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[]) def tokenize(self, request: TokenizationRequest) -> TokenizationRequestResult: cache_key = asdict(request) @@ -107,15 +101,14 @@ def do_it(): response.raise_for_status() response_data = response.json() return response_data + if self.do_cache: result, cached = self.cache.get(cache_key, wrap_request_time(do_it)) else: result, cached = do_it(), False except Exception as e: error: str = f"Local Model error: {e}" - return TokenizationRequestResult( - success=False, cached=False, error=error, text="", tokens=[] - ) + return TokenizationRequestResult(success=False, cached=False, error=error, text="", tokens=[]) return TokenizationRequestResult( success=True,