Skip to content

Commit

Permalink
Merge branch 'main' into bagatur/rfc_set_test_vals
Browse files Browse the repository at this point in the history
  • Loading branch information
baskaryan authored Jan 10, 2025
2 parents 400e38b + 843d55d commit 9250bbf
Show file tree
Hide file tree
Showing 6 changed files with 77 additions and 29 deletions.
3 changes: 2 additions & 1 deletion js/src/run_trees.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import {
import {
RuntimeEnvironment,
getEnvironmentVariable,
getLangSmithEnvironmentVariable,
getRuntimeEnvironment,
} from "./utils/env.js";
import { Client } from "./client.js";
Expand Down Expand Up @@ -231,7 +232,7 @@ export class RunTree implements BaseRun {
id: uuid.v4(),
run_type: "chain",
project_name:
getEnvironmentVariable("LANGCHAIN_PROJECT") ??
getLangSmithEnvironmentVariable("PROJECT") ??
getEnvironmentVariable("LANGCHAIN_SESSION") ?? // TODO: Deprecate
"default",
child_runs: [],
Expand Down
2 changes: 1 addition & 1 deletion python/langsmith/_internal/_serde.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ def dumps_json(obj: Any) -> bytes:
logger.debug(f"Orjson serialization failed: {repr(e)}. Falling back to json.")
result = json.dumps(
obj,
default=_simple_default,
default=_serialize_json,
ensure_ascii=True,
).encode("utf-8")
try:
Expand Down
4 changes: 4 additions & 0 deletions python/langsmith/schemas.py
Original file line number Diff line number Diff line change
Expand Up @@ -523,6 +523,10 @@ class FeedbackSourceBase(BaseModel):
"""The type of the feedback source."""
metadata: Optional[Dict[str, Any]] = Field(default_factory=dict)
"""Additional metadata for the feedback source."""
user_id: Optional[Union[UUID, str]] = None
"""The user ID associated with the feedback source."""
user_name: Optional[str] = None
"""The user name associated with the feedback source."""


class APIFeedbackSource(FeedbackSourceBase):
Expand Down
53 changes: 32 additions & 21 deletions python/langsmith/wrappers/_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,14 @@
import functools
import logging
from collections import defaultdict
from collections.abc import Mapping
from typing import (
TYPE_CHECKING,
Any,
Callable,
DefaultDict,
Dict,
List,
Mapping,
Optional,
Type,
TypeVar,
Expand Down Expand Up @@ -82,23 +82,28 @@ def _reduce_choices(choices: List[Choice]) -> dict:
"content": "",
}
for c in reversed_choices:
if c.delta.role:
if hasattr(c, "delta") and getattr(c.delta, "role", None):
message["role"] = c.delta.role
break
tool_calls: DefaultDict[int, List[ChoiceDeltaToolCall]] = defaultdict(list)
for c in choices:
if c.delta.content:
message["content"] += c.delta.content
if c.delta.function_call:
if not message.get("function_call"):
message["function_call"] = {"name": "", "arguments": ""}
if c.delta.function_call.name:
message["function_call"]["name"] += c.delta.function_call.name
if c.delta.function_call.arguments:
message["function_call"]["arguments"] += c.delta.function_call.arguments
if c.delta.tool_calls:
for tool_call in c.delta.tool_calls:
tool_calls[c.index].append(tool_call)
if hasattr(c, "delta") and getattr(c.delta, "content", None):
if getattr(c.delta, "content", None):
message["content"] += c.delta.content
if getattr(c.delta, "function_call", None):
if not message.get("function_call"):
message["function_call"] = {"name": "", "arguments": ""}
name_ = getattr(c.delta.function_call, "name", None)
if name_:
message["function_call"]["name"] += name_
arguments_ = getattr(c.delta.function_call, "arguments", None)
if arguments_:
message["function_call"]["arguments"] += arguments_
if getattr(c.delta, "tool_calls", None):
tool_calls_list = c.delta.tool_calls
if tool_calls_list is not None:
for tool_call in tool_calls_list:
tool_calls[c.index].append(tool_call)
if tool_calls:
message["tool_calls"] = [None for _ in tool_calls.keys()]
for index, tool_call_chunks in tool_calls.items():
Expand All @@ -108,22 +113,28 @@ def _reduce_choices(choices: List[Choice]) -> dict:
"type": next((c.type for c in tool_call_chunks if c.type), None),
}
for chunk in tool_call_chunks:
if chunk.function:
if getattr(chunk, "function", None):
if not message["tool_calls"][index].get("function"):
message["tool_calls"][index]["function"] = {
"name": "",
"arguments": "",
}
if chunk.function.name:
name_ = getattr(chunk.function, "name", None)
if name_:
fn_ = message["tool_calls"][index]["function"]
fn_["name"] += chunk.function.name
if chunk.function.arguments:
fn_["name"] += name_
arguments_ = getattr(chunk.function, "arguments", None)
if arguments_:
fn_ = message["tool_calls"][index]["function"]
fn_["arguments"] += chunk.function.arguments
fn_["arguments"] += arguments_
return {
"index": choices[0].index,
"index": getattr(choices[0], "index", 0) if choices else 0,
"finish_reason": next(
(c.finish_reason for c in reversed_choices if c.finish_reason),
(
c.finish_reason
for c in reversed_choices
if getattr(c, "finish_reason", None)
),
None,
),
"message": message,
Expand Down
32 changes: 32 additions & 0 deletions python/tests/integration_tests/test_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from pydantic import BaseModel
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor

from langsmith._internal._serde import dumps_json
from langsmith.client import ID_TYPE, Client
from langsmith.evaluation import aevaluate, evaluate
from langsmith.schemas import (
Expand Down Expand Up @@ -1155,6 +1156,37 @@ def test_surrogates():
)


def test_fallback_json_serialization():
class Document(BaseModel):
content: str

raw_surrogates = [
("Hello\ud83d\ude00", "Hello😀"),
("Python\ud83d\udc0d", "Python🐍"),
("Surrogate\ud834\udd1e", "Surrogate𝄞"),
("Example\ud83c\udf89", "Example🎉"),
("String\ud83c\udfa7", "String🎧"),
("With\ud83c\udf08", "With🌈"),
("Surrogates\ud83d\ude0e", "Surrogates😎"),
("Embedded\ud83d\udcbb", "Embedded💻"),
("In\ud83c\udf0e", "In🌎"),
("The\ud83d\udcd6", "The📖"),
("Text\ud83d\udcac", "Text💬"),
("收花🙄·到", "收花🙄·到"),
]
pydantic_surrogates = [
(Document(content=item), expected) for item, expected in raw_surrogates
]

for item, expected in raw_surrogates:
output = dumps_json(item).decode("utf8")
assert f'"{expected}"' == output

for item, expected in pydantic_surrogates:
output = dumps_json(item).decode("utf8")
assert f'{{"content":"{expected}"}}' == output


def test_runs_stats():
langchain_client = Client()
# We always have stuff in the "default" project...
Expand Down
12 changes: 6 additions & 6 deletions python/tests/integration_tests/wrappers/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -381,13 +381,13 @@ def test_parse_sync_api():
original_client = openai.Client()
patched_client = wrap_openai(openai.Client(), tracing_extra={"client": ls_client})

messages = [{"role": "user", "content": "Say 'Foo' then stop."}]
messages = [{"role": "user", "content": "Say 'foo' then stop."}]

original = original_client.beta.chat.completions.parse(
messages=messages, model="gpt-3.5-turbo"
messages=messages, model="gpt-3.5-turbo", temperature=0, seed=42, max_tokens=3
)
patched = patched_client.beta.chat.completions.parse(
messages=messages, model="gpt-3.5-turbo"
messages=messages, model="gpt-3.5-turbo", temperature=0, seed=42, max_tokens=3
)

assert type(original) is type(patched)
Expand All @@ -413,13 +413,13 @@ async def test_parse_async_api():
openai.AsyncClient(), tracing_extra={"client": ls_client}
)

messages = [{"role": "user", "content": "Say 'Foo' then stop."}]
messages = [{"role": "user", "content": "Say 'foo' then stop."}]

original = await original_client.beta.chat.completions.parse(
messages=messages, model="gpt-3.5-turbo"
messages=messages, model="gpt-3.5-turbo", temperature=0, seed=42, max_tokens=3
)
patched = await patched_client.beta.chat.completions.parse(
messages=messages, model="gpt-3.5-turbo"
messages=messages, model="gpt-3.5-turbo", temperature=0, seed=42, max_tokens=3
)

assert type(original) is type(patched)
Expand Down

0 comments on commit 9250bbf

Please sign in to comment.