-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp_local_rag.py
More file actions
87 lines (71 loc) · 3.17 KB
/
app_local_rag.py
File metadata and controls
87 lines (71 loc) · 3.17 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import streamlit as st
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import PyPDFLoader, TextLoader
from langchain.chains import RetrievalQA
from langchain_community.llms import Ollama
import os
import tempfile
st.set_page_config(page_title="🧠 Local RAG - Chat with Document", layout="wide")
st.title("🧠 Chat with Your Document (Local RAG - No OpenAI)")
if "vectorstore" not in st.session_state:
st.session_state.vectorstore = None
uploaded_file = st.file_uploader("Upload a PDF or TXT file", type=["pdf", "txt"])
if uploaded_file:
with st.spinner("Processing and indexing your document..."):
# Save to temporary file
with tempfile.NamedTemporaryFile(
delete=False, suffix=uploaded_file.name
) as tmp_file:
tmp_file.write(uploaded_file.getvalue())
tmp_path = tmp_file.name
# Load document
if uploaded_file.name.endswith(".pdf"):
loader = PyPDFLoader(tmp_path)
else:
loader = TextLoader(tmp_path)
documents = loader.load()
# Split into chunks
splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
chunks = splitter.split_documents(documents)
st.write(f"📝 Loaded {len(documents)} document(s)")
st.write(f"🧩 Generated {len(chunks)} chunks")
# Create embeddings using HuggingFace
embed_model = HuggingFaceEmbeddings(
model_name="sentence-transformers/all-MiniLM-L6-v2"
)
# Store in FAISS
if not chunks:
st.error(
"No document chunks to index. Please check the uploaded file or chunking logic."
)
st.stop() # ✅ Stop the app to avoid using undefined vectorstore
else:
vectorstore = FAISS.from_documents(chunks, embed_model)
st.session_state.vectorstore = vectorstore
st.success("✅ Document embedded and indexed!")
st.session_state.vectorstore = vectorstore
st.success("✅ Document embedded and indexed!")
# Ask a question
if st.session_state.vectorstore:
query = st.text_input("💬 Ask something about your document:")
if query:
with st.spinner("💡 Generating answer..."):
# Local LLM via Ollama (e.g., mistral)
llm = Ollama(model="mistral")
# Use LangChain RetrievalQA chain
qa_chain = RetrievalQA.from_chain_type(
llm=llm,
retriever=st.session_state.vectorstore.as_retriever(),
return_source_documents=True,
)
result = qa_chain.invoke({"query": query})
# Show the response
st.markdown("### 🧠 Answer:")
st.write(result["result"])
# Optional: Show source chunks used
with st.expander("📄 Sources used"):
for i, doc in enumerate(result["source_documents"]):
st.markdown(f"**Source #{i+1}**:")
st.write(doc.page_content[:500])