diff --git a/README.md b/README.md index fd2de7e20..bbac6fbe1 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ Ingestion has the following steps: 3. Split documents with LangChain's [TextSplitter](https://langchain.readthedocs.io/en/latest/reference/modules/text_splitter.html) 4. Create a vectorstore of embeddings, using LangChain's [vectorstore wrapper](https://python.langchain.com/en/latest/modules/indexes/vectorstores.html) (with OpenAI's embeddings and FAISS vectorstore). -Question-Answering has the following steps, all handled by [ChatVectorDBChain](https://langchain.readthedocs.io/en/latest/modules/indexes/chain_examples/chat_vector_db.html): +Question-Answering has the following steps, all handled by [ConversationalRetrievalChain](https://python.langchain.com/en/latest/reference/modules/chains.html?highlight=ConversationalRetrievalChain#langchain.chains.ConversationalRetrievalChain): 1. Given the chat history and new user input, determine what a standalone question would be (using GPT-3). 2. Given that standalone question, look up relevant documents from the vectorstore. diff --git a/query_data.py b/query_data.py index c0028317f..99a48bee9 100644 --- a/query_data.py +++ b/query_data.py @@ -1,7 +1,7 @@ -"""Create a ChatVectorDBChain for question/answering.""" -from langchain.callbacks.base import AsyncCallbackManager +"""Create a ConversationalRetrievalChain for question/answering.""" +from langchain.callbacks.manager import AsyncCallbackManager from langchain.callbacks.tracers import LangChainTracer -from langchain.chains import ChatVectorDBChain +from langchain.chains import ConversationalRetrievalChain from langchain.chains.chat_vector_db.prompts import (CONDENSE_QUESTION_PROMPT, QA_PROMPT) from langchain.chains.llm import LLMChain @@ -12,9 +12,9 @@ def get_chain( vectorstore: VectorStore, question_handler, stream_handler, tracing: bool = False -) -> ChatVectorDBChain: - """Create a ChatVectorDBChain for question/answering.""" - # Construct a ChatVectorDBChain with a streaming llm for combine docs +) -> ConversationalRetrievalChain: + """Create a ConversationalRetrievalChain for question/answering.""" + # Construct a ConversationalRetrievalChain with a streaming llm for combine docs # and a separate, non-streaming llm for question generation manager = AsyncCallbackManager([]) question_manager = AsyncCallbackManager([question_handler]) @@ -45,10 +45,11 @@ def get_chain( streaming_llm, chain_type="stuff", prompt=QA_PROMPT, callback_manager=manager ) - qa = ChatVectorDBChain( - vectorstore=vectorstore, + qa = ConversationalRetrievalChain( + vectorstore=vectorstore.as_retriever(), combine_docs_chain=doc_chain, question_generator=question_generator, callback_manager=manager, + return_source_documents=True ) return qa