-
Notifications
You must be signed in to change notification settings - Fork 8
/
pdfbot.py
120 lines (96 loc) · 4.67 KB
/
pdfbot.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import streamlit as st
from streamlit_chat import message
from langchain.chains import ConversationalRetrievalChain
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.llms import CTransformers
from langchain.llms import Replicate
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.memory import ConversationBufferMemory
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders import TextLoader
from langchain.document_loaders import Docx2txtLoader
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
import os
from dotenv import load_dotenv
import tempfile
load_dotenv()
def initialize_session_state():
if 'history' not in st.session_state:
st.session_state['history'] = []
if 'generated' not in st.session_state:
st.session_state['generated'] = ["Hello! Ask me anything about the files you have uploaded"]
if 'past' not in st.session_state:
st.session_state['past'] = ["Hey! 👋"]
def conversation_chat(query, chain, history):
result = chain({"question": query, "chat_history": history})
history.append((query, result["answer"]))
return result["answer"]
def display_chat_history(chain):
reply_container = st.container()
container = st.container()
with container:
with st.form(key='my_form', clear_on_submit=True):
user_input = st.text_input("Question:", placeholder="Ask about your Documents", key='input')
submit_button = st.form_submit_button(label='Send')
if submit_button and user_input:
with st.spinner('Generating response...'):
output = conversation_chat(user_input, chain, st.session_state['history'])
st.session_state['past'].append(user_input)
st.session_state['generated'].append(output)
if st.session_state['generated']:
with reply_container:
for i in range(len(st.session_state['generated'])):
message(st.session_state["past"][i], is_user=True, key=str(i) + '_user', avatar_style="thumbs")
message(st.session_state["generated"][i], key=str(i), avatar_style="fun-emoji")
def create_conversational_chain(vector_store):
load_dotenv()
# Create llm
# llm = CTransformers(model="llama-2-7b-chat.ggmlv3.q4_0.bin",
# streaming=True,
# callbacks=[StreamingStdOutCallbackHandler()],
# model_type="llama", config={'max_new_tokens': 500, 'temperature': 0.01})
llm = Replicate(
streaming=True,
model="replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781",
callbacks=[StreamingStdOutCallbackHandler()],
input={"temperature": 0.01, "max_length": 500, "top_p": 1})
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
chain = ConversationalRetrievalChain.from_llm(llm=llm, chain_type='stuff',
retriever=vector_store.as_retriever(search_kwargs={"k": 2}),
memory=memory)
return chain
def pdf_reader():
load_dotenv()
# Initialize session state
initialize_session_state()
uploaded_files = st.sidebar.file_uploader("Upload files", accept_multiple_files=True)
if uploaded_files:
text = []
for file in uploaded_files:
file_extension = os.path.splitext(file.name)[1]
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
temp_file.write(file.read())
temp_file_path = temp_file.name
loader = None
if file_extension == ".pdf":
loader = PyPDFLoader(temp_file_path)
elif file_extension == ".docx" or file_extension == ".doc":
loader = Docx2txtLoader(temp_file_path)
elif file_extension == ".txt":
loader = TextLoader(temp_file_path)
if loader:
text.extend(loader.load())
os.remove(temp_file_path)
text_splitter = CharacterTextSplitter(separator="\n", chunk_size=1000, chunk_overlap=100, length_function=len)
text_chunks = text_splitter.split_documents(text)
# Create embeddings
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2",
model_kwargs={'device': 'cpu'})
# Create vector store
vector_store = FAISS.from_documents(text_chunks, embedding=embeddings)
# Create the chain object
chain = create_conversational_chain(vector_store)
display_chat_history(chain)
if __name__ == "__main__":
pdf_reader()