Skip to content

Commit

Permalink
Update default memories
Browse files Browse the repository at this point in the history
  • Loading branch information
hinthornw committed Oct 2, 2024
1 parent 12021e1 commit e778bd0
Show file tree
Hide file tree
Showing 11 changed files with 145 additions and 115 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/integration-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ jobs:
run: |
curl -LsSf https://astral.sh/uv/install.sh | sh
uv venv
uv pip install -r pyproject.toml
uv pip install -r pyproject.toml --extra dev
uv pip install -U pytest-asyncio vcrpy
- name: Run integration tests
env:
Expand Down
13 changes: 8 additions & 5 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,12 @@ requires-python = ">=3.9"
dependencies = [
"langgraph>=0.2.32,<0.3.0",
# Optional (for selecting different models)
"langchain-openai>=0.1.22",
"langchain-anthropic>=0.1.23",
"langchain>=0.2.14",
"langchain-fireworks>=0.1.7",
"langchain-openai>=0.2.1",
"langchain-anthropic>=0.2.1",
"langchain>=0.3.1",
"python-dotenv>=1.0.1",
"langgraph-sdk>=0.1.32",
"trustcall>=0.0.20",
"trustcall>=0.0.21",
]

[project.optional-dependencies]
Expand Down Expand Up @@ -57,3 +56,7 @@ include = ["*.py", "*.pyi", "*.ipynb"]
"ntbk/*" = ["D", "UP", "T201"]
[tool.ruff.lint.pydocstyle]
convention = "google"

[tool.mypy]
ignore_errors = true

39 changes: 39 additions & 0 deletions src/chatbot/configuration.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
"""Define the configurable parameters for the chat bot."""

import os
from dataclasses import dataclass, fields
from typing import Any, Optional

from langchain_core.runnables import RunnableConfig

from chatbot.prompts import SYSTEM_PROMPT


@dataclass(kw_only=True)
class ChatConfigurable:
"""The configurable fields for the chatbot."""

user_id: str = "default-user"
mem_assistant_id: str = (
"memory_graph" # update to the UUID if you configure a custom assistant
)
model: str = "anthropic/claude-3-5-sonnet-20240620"
delay_seconds: int = 10 # For debouncing memory creation
system_prompt: str = SYSTEM_PROMPT
# None will default to connecting to the local deployment
memory_service_url: str | None = None

@classmethod
def from_runnable_config(
cls, config: Optional[RunnableConfig] = None
) -> "ChatConfigurable":
"""Load configuration."""
configurable = (
config["configurable"] if config and "configurable" in config else {}
)
values: dict[str, Any] = {
f.name: os.environ.get(f.name.upper(), configurable.get(f.name))
for f in fields(cls)
if f.init
}
return cls(**{k: v for k, v in values.items() if v})
91 changes: 18 additions & 73 deletions src/chatbot/graph.py
Original file line number Diff line number Diff line change
@@ -1,84 +1,27 @@
"""Example chatbot that incorporates user memories."""

import os
import uuid
from dataclasses import dataclass, fields
from dataclasses import dataclass
from datetime import datetime, timezone
from typing import List, Optional

from langchain.chat_models import init_chat_model
from langchain_core.messages import AnyMessage
from langchain_core.runnables import RunnableConfig
from langgraph.graph import StateGraph, add_messages
from langgraph.graph import StateGraph
from langgraph.graph.message import Messages, add_messages
from langgraph_sdk import get_client
from typing_extensions import Annotated

from chatbot.prompts import SYSTEM_PROMPT
from chatbot.configuration import ChatConfigurable
from chatbot.utils import format_memories, init_model


@dataclass
class ChatState:
"""The state of the chatbot."""

messages: Annotated[List[AnyMessage], add_messages]
messages: Annotated[list[Messages], add_messages]


@dataclass(kw_only=True)
class ChatConfigurable:
"""The configurable fields for the chatbot."""

user_id: str = "default-user"
mem_assistant_id: str = (
"memory_graph" # update to the UUID if you configure a custom assistant
)
model: str = "anthropic/claude-3-5-sonnet-20240620"
delay_seconds: int = 10 # For debouncing memory creation
system_prompt: str = SYSTEM_PROMPT
# None will default to connecting to the local deployment
memory_service_url: str | None = None

@classmethod
def from_runnable_config(cls, config: Optional[RunnableConfig] = None):
"""Load configuration."""
configurable = (
config["configurable"] if config and "configurable" in config else {}
)
values = {
f.name: os.environ.get(f.name.upper(), configurable.get(f.name))
for f in fields(cls)
if f.init
}
return cls(**{k: v for k, v in values.items() if v})


def format_memories(memories: Optional[list[dict]]) -> str:
"""Format the user's memories."""
if not memories:
return ""
# Note Bene: You can format better than this....
memories = "\n".join(str(m) for m in memories)
return f"""
## Memories
You have noted the following memorable events from previous interactions with the user.
<memories>
{memories}
</memories>
"""


def init_model(fully_specified_name: str):
"""Initialize the configured chat model."""
if "/" in fully_specified_name:
provider, model = fully_specified_name.split("/", maxsplit=1)
else:
provider = None
model = fully_specified_name
return init_chat_model(model, model_provider=provider)


async def bot(state: ChatState, config: RunnableConfig) -> ChatState:
async def bot(state: ChatState, config: RunnableConfig) -> dict[str, list[Messages]]:
"""Prompt the bot to resopnd to the user, incorporating memories (if provided)."""
configurable = ChatConfigurable.from_runnable_config(config)
memory_client = get_client(url=configurable.memory_service_url)
Expand All @@ -89,27 +32,29 @@ async def bot(state: ChatState, config: RunnableConfig) -> ChatState:

model = init_model(configurable.model)
prompt = configurable.system_prompt.format(
user_info=format_memories([item["value"] for item in user_memory["items"]]),
user_info=format_memories([item for item in user_memory["items"]]),
time=datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S"),
)
m = await model.ainvoke(
[{"role": "system", "content": prompt}] + state.messages,
[{"role": "system", "content": prompt}, *state.messages],
)

return {"messages": [m]}


async def schedule_memories(state: ChatState, config: RunnableConfig) -> ChatState:
"""Prompt the bot to resopnd to the user, incorporating memories (if provided)."""
async def schedule_memories(state: ChatState, config: RunnableConfig) -> None:
"""Prompt the bot to respond to the user, incorporating memories (if provided)."""
configurable = ChatConfigurable.from_runnable_config(config)
memory_client = get_client(url=configurable.memory_service_url)
memory_thread = uuid.uuid5(
uuid.NAMESPACE_DNS,
configurable.user_id + config["configurable"]["thread_id"],
memory_thread = str(
uuid.uuid5(
uuid.NAMESPACE_DNS,
configurable.user_id + config["configurable"]["thread_id"],
)
)
await memory_client.threads.create(thread_id=memory_thread, if_exists="do_nothing")
await memory_client.runs.create(
memory_thread,
thread_id=memory_thread,
assistant_id=configurable.mem_assistant_id,
input={
# the service dedupes messages by ID, so we can send the full convo each time
Expand All @@ -122,7 +67,7 @@ async def schedule_memories(state: ChatState, config: RunnableConfig) -> ChatSta
},
},
multitask_strategy="rollback",
# This let's us "debounce" repeated requests to the memory graph
# This lets us "debounce" repeated requests to the memory graph
# if the user is actively engaging in a conversation
after_seconds=configurable.delay_seconds,
)
Expand Down
35 changes: 35 additions & 0 deletions src/chatbot/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
"""Define utility functions for your graph."""

from typing import Any, Mapping, Optional

from langchain.chat_models import init_chat_model
from langchain_core.language_models import BaseChatModel


def format_memories(memories: Optional[list[Mapping[str, Any]]]) -> str:
"""Format the user's memories."""
if not memories:
return ""
# Note Bene: You can format better than this....
formatted_memories = "\n".join(
f'{str(m["value"])}\tLast updated: {m["updated_at"]}' for m in memories
)
return f"""
## Memories
You have noted the following memorable events from previous interactions with the user.
<memories>
{formatted_memories}
</memories>
"""


def init_model(fully_specified_name: str) -> BaseChatModel:
"""Initialize the configured chat model."""
if "/" in fully_specified_name:
provider, model = fully_specified_name.split("/", maxsplit=1)
else:
provider = None
model = fully_specified_name
return init_chat_model(model, model_provider=provider)
39 changes: 30 additions & 9 deletions src/memory_graph/configuration.py
Original file line number Diff line number Diff line change
@@ -1,25 +1,27 @@
"""Define the configurable parameters for the agent."""
"""Define the configurable parameters for the memory service."""

import os
from dataclasses import dataclass, field, fields
from typing import Literal, Optional
from typing import Any, Literal, Optional

from langchain_core.runnables import RunnableConfig
from typing_extensions import Annotated

from memory_graph.prompts import SYSTEM_PROMPT


@dataclass(kw_only=True)
class MemoryConfig:
"""Configuration for memory-related operations."""

name: str
"""This tells the model how to reference the function
and organizes related memories within the namespace."""
description: str
"""Description for what this memory type is intended to capture."""

parameters: dict
parameters: dict[str, Any]
"""The JSON Schema of the memory document to manage."""
system_prompt: Optional[str] = SYSTEM_PROMPT
system_prompt: str = ""
"""The system prompt to use for the memory assistant."""
update_mode: Literal["patch", "insert"] = field(default="patch")
"""Whether to continuously patch the memory, or treat each new
Expand Down Expand Up @@ -54,12 +56,14 @@ class Configuration:
"""The memory_types for the memory assistant."""

@classmethod
def from_runnable_config(cls, config: Optional[RunnableConfig] = None):
def from_runnable_config(
cls, config: Optional[RunnableConfig] = None
) -> "Configuration":
"""Create a Configuration instance from a RunnableConfig."""
configurable = (
config["configurable"] if config and "configurable" in config else {}
)
values = {
values: dict[str, Any] = {
f.name: os.environ.get(f.name.upper(), configurable.get(f.name))
for f in fields(cls)
if f.init
Expand Down Expand Up @@ -91,6 +95,19 @@ def from_runnable_config(cls, config: Optional[RunnableConfig] = None):
"items": {"type": "string"},
"description": "A list of the user's interests",
},
"home": {
"type": "string",
"description": "Description of the user's home town/neighborhood, etc.",
},
"occupation": {
"type": "string",
"description": "The user's current occupation or profession",
},
"conversation_preferences": {
"type": "array",
"items": {"type": "string"},
"description": "A list of the user's preferred conversation styles, pronouns, topics they want to avoid, etc.",
},
},
},
),
Expand All @@ -103,7 +120,11 @@ def from_runnable_config(cls, config: Optional[RunnableConfig] = None):
"properties": {
"context": {
"type": "string",
"description": "The situation or circumstance in which the memory occurred that inform when it would be useful to recall this.",
"description": "The situation or circumstance where this memory may be relevant. "
"Include any caveats or conditions that contextualize the memory. "
"For example, if a user shares a preference, note if it only applies "
"in certain situations (e.g., 'only at work'). Add any other relevant "
"'meta' details that help fully understand when and how to use this memory.",
},
"content": {
"type": "string",
Expand Down
2 changes: 1 addition & 1 deletion src/memory_graph/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@
from dataclasses import asdict

from langchain_core.runnables import RunnableConfig
from langgraph.constants import Send
from langgraph.graph import StateGraph
from langgraph.store.base import BaseStore
from langgraph.types import Send
from trustcall import create_extractor

from memory_graph import configuration, utils
Expand Down
17 changes: 0 additions & 17 deletions src/memory_graph/prompts.py

This file was deleted.

8 changes: 4 additions & 4 deletions src/memory_graph/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,12 @@

from langchain.chat_models import init_chat_model
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AnyMessage, merge_message_runs
from langchain_core.messages import BaseMessage, merge_message_runs


def prepare_messages(
messages: Sequence[AnyMessage], system_prompt: str
) -> list[AnyMessage]:
messages: Sequence[BaseMessage], system_prompt: str
) -> list[BaseMessage]:
"""Merge message runs and add instructions before and after to stay on task."""
sys = {
"role": "system",
Expand All @@ -25,7 +25,7 @@ def prepare_messages(
"<memory-system>Reflect on the interaction above."
" What memories ought to be retained or updated?</memory-system>",
}
return merge_message_runs([sys] + list(messages) + [m])
return list(merge_message_runs(messages=[sys] + list(messages) + [m]))


def init_model(fully_specified_name: str) -> BaseChatModel:
Expand Down
Binary file modified static/memory_graph.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading

0 comments on commit e778bd0

Please sign in to comment.