I am trying to provide a custom prompt for doing Q&A in langchain. I wasn't able to do that with ConversationalRetrievalChain as it was not allowing for multiple custom inputs in custom prompt. Hence, I used load_qa_chain but with load_qa_chain, I am unable to use memory.
How to add memory to load_qa_chain or How to implement ConversationalRetrievalChain with custom prompt with multiple inputs.
import openai
import numpy as np
import pandas as pd
import os
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA, ConversationalRetrievalChain,RetrievalQAWithSourcesChain
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.chains.question_answering import load_qa_chain
from langchain.document_loaders import UnstructuredFileLoader
from langchain.prompts import PromptTemplate
from langchain.document_loaders import UnstructuredExcelLoader
loader = UnstructuredFileLoader("../document.pdf", mode="elements")
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
#embeddings = OpenAIEmbeddings()
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
vectorDB = Chroma.from_documents(texts,embeddings)
prompt_template = "You are a Chat customer support agent.\
Address the customer as Dear Mr. or Miss. depending on customer's gender followed by Customer's First Name.\
Use the following pieces of context to answer the question at the end.\
If you don't know the answer, just say that you don't know, don't try to make up an answer.\
Below are the details of the customer:\
Customer's Name : {Customer_Name} \
Customer's Resident State: {Customer_State}\
Customer's Gender: {Customer_Gender}\
{context}\
Question: {question}\
Answer: "
import json
# Opening JSON file
with open('Customer_profile.json', 'r') as openfile:
# Reading from json file
json_object = json.load(openfile)
cName=json_object['Customer_Name']
cState=json_object['Customer_State']
cGen=json_object['Customer_Gender']
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question","Customer_Name","Customer_State","Customer_Gender"]
)
chain_type_kwargs = {"prompt": PROMPT}
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(memory_key="chat_history", output_key='answer',return_messages=True)
#qa = RetrievalQAWithSourcesChain.from_chain_type(OpenAI(temperature=0), retriever=vectorDB.as_retriever(),chain_type="stuff", memory=memory,return_source_documents=True,chain_type_kwargs=chain_type_kwargs)
#qa = RetrievalQAWithSourcesChain.from_chain_type(OpenAI(temperature=0), retriever=vectorDB.as_retriever(),chain_type="stuff", memory=memory,return_source_documents=True)
#qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0), retriever=vectorDB.as_retriever(),chain_type="stuff", memory=memory,return_source_documents=True,chain_type_kwargs=chain_type_kwargs)
#qa = load_qa_with_sources_chain(OpenAI(temperature=0),chain_type="stuff",prompt=PROMPT)
qa = load_qa_chain(OpenAI(temperature=0.1),chain_type="stuff",prompt=PROMPT)
import langchain
langchain.debug=False
query="How's the weather in my place?"
docs = vectorDB.similarity_search(query)
#vectordbkwargs = {"search_distance": 0.9}
#result=qa({"input_documents": docs,"question": query,'Customer_Gender':'Male','Customer_State':'Madhya Pradesh','Customer_Name':'Bob'})
result=qa({"input_documents": docs,"question": query,'Customer_Gender':'Male','Customer_State':'Madhya Pradesh','Customer_Name':'Bob'})
#result=qa({"question": query})
print(result['output_text'])
Customer Profile.JSON
{
"Customer_Name": "Bob",
"Customer_State": "NY",
"Customer_Gender": "Male"
}