diff --git a/src/chatbot/graph.py b/src/chatbot/graph.py index b234061..165669f 100644 --- a/src/chatbot/graph.py +++ b/src/chatbot/graph.py @@ -55,8 +55,9 @@ async def schedule_memories(state: ChatState, config: RunnableConfig) -> None: # that run will be canceled, and a **new** one will be scheduled once # this node is executed again. thread_id=config["configurable"]["thread_id"], - # Rollback & cancel any scheduled runs for the target thread - # that haven't completed + # This memory-formation run will be enqueued and run later + # If a new run comes in before it is scheduled, it will be cancelled, + # then when this node is executed again, a *new* run will be scheduled multitask_strategy="enqueue", # This lets us "debounce" repeated requests to the memory graph # if the user is actively engaging in a conversation. This saves us $$ and diff --git a/src/memory_graph/graph.py b/src/memory_graph/graph.py index 9ead059..793344d 100644 --- a/src/memory_graph/graph.py +++ b/src/memory_graph/graph.py @@ -31,7 +31,7 @@ async def handle_patch_memory( # Fetch existing memories from the store for this (patch) memory schema existing_item = await store.aget(namespace, "memory") - existing = {existing_item.key: existing_item.value} if existing_item else None + existing = {state.function_name: existing_item.value} if existing_item else None # Get the configuration for this memory schema (identified by function_name) memory_config = next(