Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

remove eager mode on feedback #216

Merged
merged 2 commits into from
Nov 16, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 16 additions & 23 deletions langserve/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
from langchain.schema.runnable import Runnable, RunnableConfig
from langchain.schema.runnable.config import get_config_list, merge_configs
from langsmith import client as ls_client
from langsmith.utils import LangSmithNotFoundError, tracing_is_enabled
from langsmith.utils import tracing_is_enabled
from typing_extensions import Annotated

try:
Expand Down Expand Up @@ -1046,6 +1046,11 @@ async def playground(
async def feedback(feedback_create_req: FeedbackCreateRequest) -> Feedback:
"""
Send feedback on an individual run to langsmith

Note that a successful response means that feedback was successfully
submitted. It does not guarantee that the feedback is recorded by
langsmith. Requests may be silently rejected if they are
unauthenticated or invalid by the server.
"""

if not tracing_is_enabled() or not enable_feedback_endpoint:
Expand All @@ -1055,28 +1060,16 @@ async def feedback(feedback_create_req: FeedbackCreateRequest) -> Feedback:
+ "enabled on your LangServe server.",
)

try:
feedback_from_langsmith = langsmith_client.create_feedback(
feedback_create_req.run_id,
feedback_create_req.key,
score=feedback_create_req.score,
value=feedback_create_req.value,
comment=feedback_create_req.comment,
source_info={
"from_langserve": True,
},
# We execute eagerly, meaning we confirm the run exists in
# LangSmith before returning a response to the user. This ensures
# that clients of the UI know that the feedback was successfully
# recorded before they receive a 200 response
eager=True,
# We lower the number of attempts to 3 to ensure we have time
# to wait for a run to show up, but do not take forever in cases
# of bad input
stop_after_attempt=3,
)
except LangSmithNotFoundError:
raise HTTPException(404, "No run with the given run_id exists")
feedback_from_langsmith = langsmith_client.create_feedback(
feedback_create_req.run_id,
feedback_create_req.key,
score=feedback_create_req.score,
value=feedback_create_req.value,
comment=feedback_create_req.comment,
source_info={
"from_langserve": True,
},
)

# We purposefully select out fields from langsmith so that we don't
# fail validation if langsmith adds extra fields. We prefer this over
Expand Down
34 changes: 0 additions & 34 deletions tests/unit_tests/test_server_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
from langchain.schema.runnable.base import RunnableLambda
from langchain.schema.runnable.utils import ConfigurableField, Input, Output
from langsmith import schemas as ls_schemas
from langsmith.utils import LangSmithNotFoundError
from pytest import MonkeyPatch
from pytest_mock import MockerFixture
from typing_extensions import TypedDict
Expand Down Expand Up @@ -1636,39 +1635,6 @@ async def test_feedback_succeeds_when_langsmith_enabled() -> None:
assert json_response == expected_response_json


@pytest.mark.asyncio
async def test_feedback_fails_when_run_doesnt_exist() -> None:
"""Tests that the feedback endpoint can't accept feedback for a non-existent run."""

with patch("langserve.server.ls_client") as mocked_ls_client_package:
with patch("langserve.server.tracing_is_enabled") as tracing_is_enabled:
tracing_is_enabled.return_value = True
mocked_client = MagicMock(return_value=None)
mocked_ls_client_package.Client.return_value = mocked_client
mocked_client.create_feedback.side_effect = LangSmithNotFoundError(
"no run :/"
)
local_app = FastAPI()
add_routes(
local_app,
RunnableLambda(lambda foo: "hello"),
enable_feedback_endpoint=True,
)

async with get_async_test_client(
local_app, raise_app_exceptions=True
) as async_client:
response = await async_client.post(
"/feedback",
json={
"run_id": "f47ac10b-58cc-4372-a567-0e02b2c3d479",
"key": "silliness",
"score": 1000,
},
)
assert response.status_code == 404


@pytest.mark.asyncio
async def test_feedback_fails_when_langsmith_disabled(app: FastAPI) -> None:
"""Tests that feedback is not sent to langsmith if langsmith is disabled."""
Expand Down