From 73d3a668c99b83f19a2cdf2580e0a4c2654bbef0 Mon Sep 17 00:00:00 2001 From: Grayson Adkins Date: Tue, 2 May 2023 23:44:48 -0500 Subject: [PATCH 1/3] Update query_data.py Use ConversationalRetrievalChain instead of ChatVectorDBChain; Update AsyncCallbackManager after refactor --- query_data.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/query_data.py b/query_data.py index c0028317f..99a48bee9 100644 --- a/query_data.py +++ b/query_data.py @@ -1,7 +1,7 @@ -"""Create a ChatVectorDBChain for question/answering.""" -from langchain.callbacks.base import AsyncCallbackManager +"""Create a ConversationalRetrievalChain for question/answering.""" +from langchain.callbacks.manager import AsyncCallbackManager from langchain.callbacks.tracers import LangChainTracer -from langchain.chains import ChatVectorDBChain +from langchain.chains import ConversationalRetrievalChain from langchain.chains.chat_vector_db.prompts import (CONDENSE_QUESTION_PROMPT, QA_PROMPT) from langchain.chains.llm import LLMChain @@ -12,9 +12,9 @@ def get_chain( vectorstore: VectorStore, question_handler, stream_handler, tracing: bool = False -) -> ChatVectorDBChain: - """Create a ChatVectorDBChain for question/answering.""" - # Construct a ChatVectorDBChain with a streaming llm for combine docs +) -> ConversationalRetrievalChain: + """Create a ConversationalRetrievalChain for question/answering.""" + # Construct a ConversationalRetrievalChain with a streaming llm for combine docs # and a separate, non-streaming llm for question generation manager = AsyncCallbackManager([]) question_manager = AsyncCallbackManager([question_handler]) @@ -45,10 +45,11 @@ def get_chain( streaming_llm, chain_type="stuff", prompt=QA_PROMPT, callback_manager=manager ) - qa = ChatVectorDBChain( - vectorstore=vectorstore, + qa = ConversationalRetrievalChain( + vectorstore=vectorstore.as_retriever(), combine_docs_chain=doc_chain, question_generator=question_generator, callback_manager=manager, + return_source_documents=True ) return qa From 80756027bca22c3bbd6ef12f25943b6c3cf3b676 Mon Sep 17 00:00:00 2001 From: Grayson Adkins Date: Wed, 3 May 2023 00:39:43 -0500 Subject: [PATCH 2/3] Update README.md Link to ConversationRetrievalChain example docs --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index fd2de7e20..3ed2a4b7f 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ Ingestion has the following steps: 3. Split documents with LangChain's [TextSplitter](https://langchain.readthedocs.io/en/latest/reference/modules/text_splitter.html) 4. Create a vectorstore of embeddings, using LangChain's [vectorstore wrapper](https://python.langchain.com/en/latest/modules/indexes/vectorstores.html) (with OpenAI's embeddings and FAISS vectorstore). -Question-Answering has the following steps, all handled by [ChatVectorDBChain](https://langchain.readthedocs.io/en/latest/modules/indexes/chain_examples/chat_vector_db.html): +Question-Answering has the following steps, all handled by [ConversationalRetrievalChain](https://python.langchain.com/en/latest/use_cases/code.html): 1. Given the chat history and new user input, determine what a standalone question would be (using GPT-3). 2. Given that standalone question, look up relevant documents from the vectorstore. From 528a2279e68082be164060fb47d3cf7553f6c7e0 Mon Sep 17 00:00:00 2001 From: Grayson Adkins Date: Wed, 3 May 2023 00:44:07 -0500 Subject: [PATCH 3/3] Update README.md Better link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3ed2a4b7f..bbac6fbe1 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ Ingestion has the following steps: 3. Split documents with LangChain's [TextSplitter](https://langchain.readthedocs.io/en/latest/reference/modules/text_splitter.html) 4. Create a vectorstore of embeddings, using LangChain's [vectorstore wrapper](https://python.langchain.com/en/latest/modules/indexes/vectorstores.html) (with OpenAI's embeddings and FAISS vectorstore). -Question-Answering has the following steps, all handled by [ConversationalRetrievalChain](https://python.langchain.com/en/latest/use_cases/code.html): +Question-Answering has the following steps, all handled by [ConversationalRetrievalChain](https://python.langchain.com/en/latest/reference/modules/chains.html?highlight=ConversationalRetrievalChain#langchain.chains.ConversationalRetrievalChain): 1. Given the chat history and new user input, determine what a standalone question would be (using GPT-3). 2. Given that standalone question, look up relevant documents from the vectorstore.