-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.py
More file actions
156 lines (120 loc) · 6.1 KB
/
app.py
File metadata and controls
156 lines (120 loc) · 6.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
import streamlit as st
import time
from dotenv import load_dotenv
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
from langchain.vectorstores import FAISS #facebook AI similarty search
# from langchain.vectorstore import FAISS, Annoy, HNSW, NGT, AnnoyNGT, AnnoyHNSW, HNSWNGT, AnnoyHNSWNGT
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
#models::::::::::
from langchain.chat_models import ChatOpenAI
from langchain.llms import HuggingFaceHub
def extract_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def get_text_chunks(text):
# i can play with this text_splitter
text_splitter = CharacterTextSplitter(
# seprators=['\n', '.', '?', '!', ';', ':', '।', '॥', '\u0964', '\u0965'],
separator="\n",
chunk_size=1000,
chunk_overlap=200, #start the next chunk from 200 characters before the end of the previous chunk
length_function=len
)
chunks = text_splitter.split_text(text)
return chunks
def get_vectorstore(text_chunks, model_name='bge'):
embeddings = None
if(model_name=="openAI"):
embeddings = OpenAIEmbeddings() # you can't download the model and use it without an internet connection.
if(model_name=="Bge"):
embeddings = HuggingFaceInstructEmbeddings(model_name='BAAI/bge-large-en')
elif(model_name=="Instructor"):
embeddings = HuggingFaceInstructEmbeddings(model_name='hkunlp/instructor-xl') ##takes about 2 min for LLM_test pdf
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
return vectorstore
def get_conversation_chain(vectorstore):
# llm = ChatOpenAI()
llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectorstore.as_retriever(),
memory=memory
)
return conversation_chain
def handle_userinput(user_question):
response = st.session_state.conversation({'question': user_question})
st.session_state.chat_history = response['chat_history']
# st.write(response)
for i, message in enumerate(st.session_state.chat_history):
if i % 2 == 1:
with st.chat_message("user"):
st.write(message.content)
else:
with st.chat_message("Vivek"):
st.write(message.content)
def main():
load_dotenv()
st.set_page_config(page_title='PDFQuestPro', page_icon=':books:', layout='wide')
if "conversation" not in st.session_state: # intialize the conversation
st.session_state.conversation = None
if "chat_history" not in st.session_state:
st.session_state.chat_history = None
st.header('PDF.Quest-Pro :books:')
st.markdown('created by Vivek Patidar')
# user_question = st.text_input('Ask the question about the PDFs')
user_question = st.chat_input("Ask question about the PDFs")
# with st.chat_message("user"):
# st.write("this is user")
# with st.chat_message("ME"):
# st.write("this is me")
if user_question:
handle_userinput(user_question)
with st.sidebar:
st.subheader('Your Documents')
pdf_docs = st.file_uploader('Upload your PDFs here :envelope_with_arrow:', type=['pdf'], accept_multiple_files=True)
embedding_model = st.selectbox('choose a embedding model',('openAI', 'Bge', 'Instructor'))
# st.write('You selected:', option)
raw_text = None
if st.button('Process :bow_and_arrow:'):
with st.spinner("Processing"):
#get pdf text...............
raw_text = extract_pdf_text(pdf_docs)
#i want to print this raw_text in my main page of streamlit app
# st.write(raw_text)
with st.chat_message("user"):
st.write(f"length of combined text:{len(raw_text)}")
#get the text chunks......................
text_chunks = get_text_chunks(raw_text)
#i want to print the chunks of data in main page of streamlit app
# st.write(text_chunks)
#create vector store...................
curr = time.time()
vectorstore = get_vectorstore(text_chunks, model_name=embedding_model)
now = time.time()
vectorstore_time = now-curr
with st.chat_message("user"):
st.write(f"Time taken to create vectorstore :{vectorstore_time: .0f} seconds")
#write the time taken to create vectorstore in second without any decimal point
#create conversation chain.....................
curr = time.time()
st.session_state.conversation = get_conversation_chain(vectorstore) #st.session_state.conversation is a global variable in streamlit
now = time.time()
conversation_chain_time = now-curr
with st.chat_message("user"):
st.write(f"Time taken to create conversation chain :{conversation_chain_time: .0f} seconds")
# with st.chat_message("user"):
# st.write(f"length of combined text:{len(raw_text)}")
# with st.chat_message("user"):
# st.write(f"Time taken to create vectorstore :{vectorstore_time: .0f} seconds")
# with st.chat_message("user"):
# st.write(f"Time taken to create conversation chain :{conversation_chain_time: .0f} seconds")
if __name__ == '__main__':
main()