diff --git a/src/helm/benchmark/annotation/live_qa_annotator.py b/src/helm/benchmark/annotation/live_qa_annotator.py index 2884b25d77..8e1ab160f5 100644 --- a/src/helm/benchmark/annotation/live_qa_annotator.py +++ b/src/helm/benchmark/annotation/live_qa_annotator.py @@ -26,6 +26,8 @@ def annotate(self, request_state: RequestState) -> Any: assert len(request_state.result.completions) == 1 model_input_text = request_state.request.prompt model_output_text = request_state.result.completions[0].text + if not model_output_text.strip(): + return {"prompt_text": "", "reasoning": "BLOCKED_REQUEST_OR_EMPTY_RESPONSE", "score": 0.0} correct_responses = "" for i, reference in enumerate(request_state.instance.references): correct_responses += "Correct Response #" + str(i) + ": " + reference.output.text + "\n" diff --git a/src/helm/benchmark/annotation/medication_qa_annotator.py b/src/helm/benchmark/annotation/medication_qa_annotator.py index 88774ac082..5e5eea3250 100644 --- a/src/helm/benchmark/annotation/medication_qa_annotator.py +++ b/src/helm/benchmark/annotation/medication_qa_annotator.py @@ -26,6 +26,8 @@ def annotate(self, request_state: RequestState) -> Any: assert len(request_state.result.completions) == 1 model_input_text = request_state.request.prompt model_output_text = request_state.result.completions[0].text + if not model_output_text.strip(): + return {"prompt_text": "", "reasoning": "BLOCKED_REQUEST_OR_EMPTY_RESPONSE", "score": 0.0} correct_response = request_state.instance.references[0].output.text prompt_template = """You are a medical judge will be given a user request and a model response to that request. Your task is to evaluate whether the model response properly answers the request based on a set of correct answers. The user's request will be provided in these tags: