Skip to content

Commit c53eb08

Browse files
committed
implemented RAG-based chat feature to allow users to ask any queries about the article
1 parent db0520c commit c53eb08

7 files changed

Lines changed: 115 additions & 18 deletions

File tree

backend/app/modules/chat/__init__.py

Whitespace-only changes.
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
from sentence_transformers import SentenceTransformer
2+
3+
embedder = SentenceTransformer("all-MiniLM-L6-v2")
4+
5+
6+
def embed_query(query: str):
7+
8+
embeddings = embedder.encode(query).tolist()
9+
10+
return embeddings
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
from pinecone import Pinecone
2+
from dotenv import load_dotenv
3+
from app.modules.chat.embed_query import embed_query
4+
import os
5+
6+
load_dotenv()
7+
8+
pc = Pinecone(os.getenv("PINECONE_API_KEY"))
9+
index = pc.Index("perspective")
10+
11+
12+
def search_pinecone(query: str, top_k: int = 5):
13+
14+
embeddings = embed_query(query)
15+
16+
results = index.query(
17+
vector=embeddings,
18+
top_k=top_k,
19+
include_metadata=True,
20+
namespace="default"
21+
22+
)
23+
24+
matches = []
25+
for match in results["matches"]:
26+
matches.append({
27+
"id": match["id"],
28+
"score": match["score"],
29+
"metadata": match["metadata"]
30+
})
31+
return matches
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
import os
2+
from groq import Groq
3+
from dotenv import load_dotenv
4+
5+
load_dotenv()
6+
7+
client = Groq(api_key=os.getenv("GROQ_API_KEY"))
8+
9+
10+
def build_context(docs):
11+
12+
return "\n".join(f"{m['metadata'].get('explanation') or m['metadata'].get('reasoning', '')}"for m in docs)
13+
14+
15+
def ask_llm(question, docs):
16+
context = build_context(docs)
17+
print(context)
18+
prompt = f"""You are an assistant that answers based on context.
19+
20+
Context:
21+
{context}
22+
23+
Question:
24+
{question}
25+
"""
26+
27+
response = client.chat.completions.create(
28+
model="gemma2-9b-it",
29+
messages=[
30+
{"role": "system", "content": "Use only the context to answer."},
31+
{"role": "user", "content": prompt}
32+
]
33+
)
34+
35+
return response.choices[0].message.content

backend/app/modules/vector_store/embed.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,3 +28,4 @@ def embed_chunks(chunks: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
2828
"metadata": chunk["metadata"]
2929
})
3030
return vectors
31+

backend/app/routes/routes.py

Lines changed: 23 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,31 +3,49 @@
33
from app.modules.pipeline import run_scraper_pipeline
44
from app.modules.pipeline import run_langgraph_workflow
55
from app.modules.bias_detection.check_bias import check_bias
6+
from app.modules.chat.get_rag_data import search_pinecone
7+
from app.modules.chat.llm_processing import ask_llm
68
import asyncio
79
import json
810

911
router = APIRouter()
1012

13+
1114
class URlRequest(BaseModel):
1215
url: str
1316

1417

18+
class ChatQuery(BaseModel):
19+
message: str
20+
21+
1522
@router.get("/")
1623
async def home():
1724
return {"message": "Perspective API is live!"}
1825

26+
1927
@router.post("/bias")
2028
async def bias_detection(request: URlRequest):
21-
content = await asyncio.to_thread(run_scraper_pipeline,(request.url))
22-
bias_score = await asyncio.to_thread(check_bias,(content))
29+
content = await asyncio.to_thread(run_scraper_pipeline, (request.url))
30+
bias_score = await asyncio.to_thread(check_bias, (content))
2331
print(bias_score)
2432
return bias_score
25-
2633

2734

2835
@router.post("/process")
2936
async def run_pipelines(request: URlRequest):
30-
article_text = await asyncio.to_thread(run_scraper_pipeline,(request.url))
37+
article_text = await asyncio.to_thread(run_scraper_pipeline, (request.url))
3138
print(json.dumps(article_text, indent=2))
32-
data = await asyncio.to_thread(run_langgraph_workflow,(article_text))
39+
data = await asyncio.to_thread(run_langgraph_workflow, (article_text))
3340
return data
41+
42+
43+
@router.post("/chat")
44+
async def answer_query(request: ChatQuery):
45+
46+
query = request.message
47+
results = search_pinecone(query)
48+
answer = ask_llm(query, results)
49+
print(answer)
50+
51+
return {"answer": answer}

frontend/app/analyze/results/page.tsx

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ import {
2020
import { Input } from "@/components/ui/input";
2121
import { Badge } from "@/components/ui/badge";
2222
import BiasMeter from "@/components/bias-meter";
23+
import axios from "axios";
2324

2425

2526
/**
@@ -79,23 +80,24 @@ export default function AnalyzePage() {
7980

8081
}, [router]);
8182

82-
const handleSendMessage = (e: React.FormEvent) => {
83+
async function handleSendMessage(e: React.FormEvent){
8384
e.preventDefault();
8485
if (!message.trim()) return;
8586
const newMessages = [...messages, { role: "user", content: message }];
8687
setMessages(newMessages);
8788
setMessage("");
88-
setTimeout(() => {
89-
setMessages([
90-
...newMessages,
91-
{
92-
role: "system",
93-
content:
94-
"Based on the article... let me know if you want more details.",
95-
},
96-
]);
97-
}, 1000);
98-
};
89+
90+
const res = await axios.post("http://Thunder1245-perspective-backend.hf.space/api/chat", {
91+
message: message
92+
});
93+
const data = res.data;
94+
95+
console.log(data)
96+
97+
// 🔹 Step 2: Append LLM’s response
98+
setMessages([...newMessages, { role: "assistant", content: data.answer }]);
99+
};
100+
99101

100102
if (isLoading || !analysisData || !biasScore) {
101103
return (
@@ -113,7 +115,7 @@ export default function AnalyzePage() {
113115
score,
114116
} = analysisData;
115117

116-
return (
118+
return(
117119
<div className="flex flex-col min-h-screen">
118120
{/* Header omitted for brevity */}
119121
<main className="flex-1 pt-16 container mx-auto px-4">

0 commit comments

Comments
 (0)