diff --git a/opentrons-ai-client/src/molecules/FeedbackModal/index.tsx b/opentrons-ai-client/src/molecules/FeedbackModal/index.tsx index e65aa7a504c..fbb006bdbd4 100644 --- a/opentrons-ai-client/src/molecules/FeedbackModal/index.tsx +++ b/opentrons-ai-client/src/molecules/FeedbackModal/index.tsx @@ -10,14 +10,60 @@ import { } from '@opentrons/components' import { useAtom } from 'jotai' import { useTranslation } from 'react-i18next' -import { feedbackModalAtom } from '../../resources/atoms' +import { feedbackModalAtom, tokenAtom } from '../../resources/atoms' import { useState } from 'react' +import type { AxiosRequestConfig } from 'axios' +import { + STAGING_FEEDBACK_END_POINT, + PROD_FEEDBACK_END_POINT, + LOCAL_FEEDBACK_END_POINT, +} from '../../resources/constants' +import { useApiCall } from '../../resources/hooks' export function FeedbackModal(): JSX.Element { const { t } = useTranslation('protocol_generator') const [feedbackValue, setFeedbackValue] = useState('') const [, setShowFeedbackModal] = useAtom(feedbackModalAtom) + const [token] = useAtom(tokenAtom) + const { callApi } = useApiCall() + + const handleSendFeedback = async (): Promise => { + try { + const headers = { + Authorization: `Bearer ${token}`, + 'Content-Type': 'application/json', + } + + const getEndpoint = (): string => { + switch (process.env.NODE_ENV) { + case 'production': + return PROD_FEEDBACK_END_POINT + case 'development': + return LOCAL_FEEDBACK_END_POINT + default: + return STAGING_FEEDBACK_END_POINT + } + } + + const url = getEndpoint() + + const config = { + url, + method: 'POST', + headers, + data: { + feedbackText: feedbackValue, + fake: false, + }, + } + await callApi(config as AxiosRequestConfig) + setShowFeedbackModal(false) + } catch (err: any) { + console.error(`error: ${err.message}`) + throw err + } + } return ( { - setShowFeedbackModal(false) + disabled={feedbackValue === ''} + onClick={async () => { + await handleSendFeedback() }} > diff --git a/opentrons-ai-client/src/resources/constants.ts b/opentrons-ai-client/src/resources/constants.ts index c5e2f8826c6..29377c210f8 100644 --- a/opentrons-ai-client/src/resources/constants.ts +++ b/opentrons-ai-client/src/resources/constants.ts @@ -1,7 +1,9 @@ // ToDo (kk:05/29/2024) this should be switched by env var export const STAGING_END_POINT = 'https://staging.opentrons.ai/api/chat/completion' +export const STAGING_FEEDBACK_END_POINT = 'https://staging.opentrons.ai/api/chat/feedback' export const PROD_END_POINT = 'https://opentrons.ai/api/chat/completion' +export const PROD_FEEDBACK_END_POINT = 'https://opentrons.ai/api/chat/feedback' // auth0 domain export const AUTH0_DOMAIN = 'identity.auth.opentrons.com' @@ -19,5 +21,6 @@ export const LOCAL_AUTH0_CLIENT_ID = 'PcuD1wEutfijyglNeRBi41oxsKJ1HtKw' export const LOCAL_AUTH0_AUDIENCE = 'sandbox-ai-api' export const LOCAL_AUTH0_DOMAIN = 'identity.auth-dev.opentrons.com' export const LOCAL_END_POINT = 'http://localhost:8000/api/chat/completion' +export const LOCAL_FEEDBACK_END_POINT = 'http://localhost:8000/api/chat/feedback' export const CLIENT_MAX_WIDTH = '1440px' diff --git a/opentrons-ai-server/api/handler/fast.py b/opentrons-ai-server/api/handler/fast.py index 8e5572d0c15..b846b46c117 100644 --- a/opentrons-ai-server/api/handler/fast.py +++ b/opentrons-ai-server/api/handler/fast.py @@ -23,6 +23,7 @@ from api.models.chat_request import ChatRequest from api.models.chat_response import ChatResponse from api.models.empty_request_error import EmptyRequestError +from api.models.feedback_response import FeedbackResponse from api.models.internal_server_error import InternalServerError from api.settings import Settings @@ -245,6 +246,34 @@ async def redoc_html() -> HTMLResponse: return get_redoc_html(openapi_url="/api/openapi.json", title="Opentrons API Documentation") +@app.post( + "/api/chat/feedback", response_model=Union[FeedbackResponse, ErrorResponse], summary="Feedback", description="Send feedback to the team." +) +async def feedback(request: Request, auth_result: Any = Security(auth.verify)) -> FeedbackResponse: # noqa: B008 + """ + Send feedback to the team. + + - **request**: The HTTP request containing the feedback message. + - **returns**: A feedback response or an error message. + """ + logger.info("POST /api/feedback") + try: + body = await request.json() + if "feedbackText" not in body.keys() or body["feedbackText"] == "": + logger.info("Feedback empty") + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=EmptyRequestError(message="Request body is empty")) + logger.info(f"Feedback received: {body}") + feedbackText = body["feedbackText"] + # todo: Store feedback text in a database + return FeedbackResponse(reply=f"Feedback Received: {feedbackText}", fake=False) + + except Exception as e: + logger.exception("Error processing feedback") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=InternalServerError(exception_object=e).model_dump() + ) from e + + @app.get("/api/doc", include_in_schema=False) async def swagger_html() -> HTMLResponse: return get_swagger_ui_html(openapi_url="/api/openapi.json", title="Opentrons API Documentation") diff --git a/opentrons-ai-server/api/models/feedback_response.py b/opentrons-ai-server/api/models/feedback_response.py new file mode 100644 index 00000000000..80e335871c3 --- /dev/null +++ b/opentrons-ai-server/api/models/feedback_response.py @@ -0,0 +1,6 @@ +from pydantic import BaseModel + + +class FeedbackResponse(BaseModel): + reply: str + fake: bool diff --git a/opentrons-ai-server/tests/helpers/client.py b/opentrons-ai-server/tests/helpers/client.py index 5bbff6699c5..327cf2d452d 100644 --- a/opentrons-ai-server/tests/helpers/client.py +++ b/opentrons-ai-server/tests/helpers/client.py @@ -68,6 +68,11 @@ def get_chat_completion(self, message: str, fake: bool = True, bad_auth: bool = headers = self.standard_headers if not bad_auth else self.invalid_auth_headers return self.httpx.post("/chat/completion", headers=headers, json=request.model_dump()) + def get_feedback(self, message: str, fake: bool = True) -> Response: + """Call the /chat/feedback endpoint and return the response.""" + request = f'{"feedbackText": "{message}"}' + return self.httpx.post("/chat/feedback", headers=self.standard_headers, json=request) + def get_bad_endpoint(self, bad_auth: bool = False) -> Response: """Call nonexistent endpoint and return the response.""" headers = self.standard_headers if not bad_auth else self.invalid_auth_headers diff --git a/opentrons-ai-server/tests/test_live.py b/opentrons-ai-server/tests/test_live.py index beb3c1b483c..12294082029 100644 --- a/opentrons-ai-server/tests/test_live.py +++ b/opentrons-ai-server/tests/test_live.py @@ -1,5 +1,6 @@ import pytest from api.models.chat_response import ChatResponse +from api.models.feedback_response import FeedbackResponse from tests.helpers.client import Client @@ -26,6 +27,13 @@ def test_get_chat_completion_bad_auth(client: Client) -> None: assert response.status_code == 401, "Chat completion with bad auth should return HTTP 401" +@pytest.mark.live +def test_get_feedback_good_auth(client: Client) -> None: + """Test the feedback endpoint with good authentication.""" + response = client.get_feedback("How do I load tipracks for my 8 channel pipette on an OT2?", fake=True) + assert response.status_code == 200, "Feedback with good auth should return HTTP 200" + FeedbackResponse.model_validate(response.json()) + @pytest.mark.live def test_get_bad_endpoint_with_good_auth(client: Client) -> None: """Test a nonexistent endpoint with good authentication."""