diff --git a/src/chatbot/configuration.py b/src/chatbot/configuration.py index 11b0144..091e525 100644 --- a/src/chatbot/configuration.py +++ b/src/chatbot/configuration.py @@ -20,8 +20,6 @@ class ChatConfigurable: model: str = "anthropic/claude-3-5-sonnet-20240620" delay_seconds: int = 10 # For debouncing memory creation system_prompt: str = SYSTEM_PROMPT - # None will default to connecting to the local deployment - memory_service_url: str | None = None @classmethod def from_runnable_config( diff --git a/src/chatbot/graph.py b/src/chatbot/graph.py index d518f77..d7a7d01 100644 --- a/src/chatbot/graph.py +++ b/src/chatbot/graph.py @@ -1,12 +1,12 @@ """Example chatbot that incorporates user memories.""" -import uuid from dataclasses import dataclass from datetime import datetime, timezone from langchain_core.runnables import RunnableConfig from langgraph.graph import StateGraph from langgraph.graph.message import Messages, add_messages +from langgraph.store.base import BaseStore from langgraph_sdk import get_client from typing_extensions import Annotated @@ -21,14 +21,15 @@ class ChatState: messages: Annotated[list[Messages], add_messages] -async def bot(state: ChatState, config: RunnableConfig) -> dict[str, list[Messages]]: +async def bot( + state: ChatState, config: RunnableConfig, store: BaseStore +) -> dict[str, list[Messages]]: """Prompt the bot to resopnd to the user, incorporating memories (if provided).""" configurable = ChatConfigurable.from_runnable_config(config) - memory_client = get_client(url=configurable.memory_service_url) namespace = (configurable.user_id,) # This lists ALL user memories in the provided namespace (up to the `limit`) # you can also filter by content. - user_memory = await memory_client.store.search_items(namespace) + user_memory = await store.search(namespace) model = init_model(configurable.model) prompt = configurable.system_prompt.format( @@ -45,16 +46,9 @@ async def bot(state: ChatState, config: RunnableConfig) -> dict[str, list[Messag async def schedule_memories(state: ChatState, config: RunnableConfig) -> None: """Prompt the bot to respond to the user, incorporating memories (if provided).""" configurable = ChatConfigurable.from_runnable_config(config) - memory_client = get_client(url=configurable.memory_service_url) - memory_thread = str( - uuid.uuid5( - uuid.NAMESPACE_DNS, - configurable.user_id + config["configurable"]["thread_id"], - ) - ) - await memory_client.threads.create(thread_id=memory_thread, if_exists="do_nothing") + memory_client = get_client() await memory_client.runs.create( - thread_id=memory_thread, + thread_id=config["configurable"]["thread_id"], assistant_id=configurable.mem_assistant_id, input={ # the service dedupes messages by ID, so we can send the full convo each time @@ -66,7 +60,7 @@ async def schedule_memories(state: ChatState, config: RunnableConfig) -> None: "user_id": configurable.user_id, }, }, - multitask_strategy="rollback", + multitask_strategy="enqueue", # This lets us "debounce" repeated requests to the memory graph # if the user is actively engaging in a conversation after_seconds=configurable.delay_seconds,