Skip to content

Commit ba87804

Browse files
authored
Merge pull request #113 from AOSSIE-Org/feat/chat
Feat/ RAG chat endpoint + Pinecone metadata fix
2 parents 38799a8 + a8cc25d commit ba87804

9 files changed

Lines changed: 176 additions & 46 deletions

File tree

.github/workflows/deploy-backend-to-hf.yml

Lines changed: 26 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -3,57 +3,67 @@ name: 🚀 Deploy Backend to HF Space
33
on:
44
push:
55
branches:
6-
- main # or your primary branch
6+
- main
77
paths:
8-
- "backend/**" # only trigger when anything under backend/ changes
8+
- "backend/**"
99

1010
jobs:
1111
deploy:
1212
runs-on: ubuntu-latest
13+
# set your HF username here (or replace with a secret if you prefer)
14+
env:
15+
HF_USER: Thunder1245
16+
HF_REPO: perspective-backend
17+
1318
steps:
1419
- name: 👉 Checkout code
1520
uses: actions/checkout@v4
1621
with:
1722
fetch-depth: 0
1823

19-
- name: 🔒 Install HF CLI
20-
run: pip install huggingface_hub
21-
22-
- name: 🔑 HF login
23-
env:
24-
HF_TOKEN: ${{ secrets.HF_TOKEN }}
25-
run: huggingface-cli login --token "$HF_TOKEN"
24+
- name: 🔍 Ensure HF_TOKEN is set
25+
run: |
26+
if [ -z "${{ secrets.HF_TOKEN }}" ]; then
27+
echo "ERROR: HF_TOKEN secret is not set. Add it in repository secrets: Settings → Secrets & variables → Actions."
28+
exit 1
29+
fi
2630
27-
- name: 📂 Prepare Space repo
31+
- name: 📂 Prepare Space repo (clone)
2832
env:
2933
HF_TOKEN: ${{ secrets.HF_TOKEN }}
3034
run: |
31-
rm -rf space-backend
35+
rm -rf space-backend || true
36+
# clone using token in URL (this authenticates the clone)
3237
git clone https://Thunder1245:${HF_TOKEN}@huggingface.co/spaces/Thunder1245/perspective-backend.git space-backend
3338
3439
- name: 📦 Install rsync
3540
run: |
3641
sudo apt-get update
3742
sudo apt-get install -y rsync
3843
39-
- name: 📤 Sync backend code
44+
- name: 📤 Sync backend code to Space
4045
env:
4146
HF_TOKEN: ${{ secrets.HF_TOKEN }}
4247
run: |
48+
set -e
49+
4350
cd space-backend
4451
45-
# Only remove tracked files (preserve .git and config)
52+
# Remove tracked files while preserving .git and config (ignore failure)
4653
git rm -r . || true
4754
cd ..
4855
49-
# Copy new backend files in
56+
# Copy backend files into the cloned space directory
5057
cp -R backend/. space-backend/
5158
52-
# Push new code to HF Space
59+
# Commit & push
5360
cd space-backend
5461
git config user.name "github-actions[bot]"
5562
git config user.email "github-actions[bot]@users.noreply.github.com"
5663
git add --all
57-
git commit -m "Autodeploy backend: ${{ github.sha }}" || echo "No changes to commit"
64+
git commit -m "Auto-deploy backend: ${{ github.sha }}" || echo "No changes to commit"
5865
git push origin main
5966
67+
- name: ✅ Done
68+
run: |
69+
echo "Backend deployed to Hugging Face Space: https://huggingface.co/spaces/${HF_USER}/${HF_REPO}"

README.md

Lines changed: 35 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2,15 +2,32 @@
22
![Perspective banner](frontend/public/perspective_banner.jpg)
33

44
### Table of Contents
5-
- [System Overview](#system-overview)
6-
- [Architecture Components](#architecture-components)
7-
- [Technical Stack](#technical-stack)
8-
- [Core Features](#core-features)
9-
- [Data Flow & Security](#data-flow--security)
10-
- [Setup & Deployment](#setup--deployment)
11-
- [Detailed Architecture Diagram](#detailed-architecture-diagram)
12-
- [Expected Outcomes](#expected-outcomes)
13-
- [Required Skills](#required-skills)
5+
- [Perspective-AI](#perspective-ai)
6+
- [Table of Contents](#table-of-contents)
7+
- [System Overview](#system-overview)
8+
- [High-Level Concept](#high-level-concept)
9+
- [Architecture Components](#architecture-components)
10+
- [1. Frontend Layer](#1-frontend-layer)
11+
- [3. Core Backend](#3-core-backend)
12+
- [4. AI \& NLP Integration](#4-ai--nlp-integration)
13+
- [5. Data Storage](#5-data-storage)
14+
- [Technical Stack](#technical-stack)
15+
- [Frontend Technologies](#frontend-technologies)
16+
- [Backend Technologies](#backend-technologies)
17+
- [I Integration](#i-integration)
18+
- [Core Features](#core-features)
19+
- [1. Counter-Perspective Generation](#1-counter-perspective-generation)
20+
- [2. Reasoned Thinking](#2-reasoned-thinking)
21+
- [3. Updated Facts](#3-updated-facts)
22+
- [4. Seamless Integration](#4-seamless-integration)
23+
- [5. Real-Time Analysis](#5-real-time-analysis)
24+
- [Data Flow \& Security](#data-flow--security)
25+
- [Setup \& Deployment](#setup--deployment)
26+
- [Frontend Setup](#frontend-setup)
27+
- [Backend Setup](#backend-setup)
28+
- [Architecture Diagram](#architecture-diagram)
29+
- [Expected Outcomes](#expected-outcomes)
30+
- [Required Skills](#required-skills)
1431

1532
---
1633

@@ -137,20 +154,25 @@ npm run dev
137154
- add .env file in `/new-backend`directory.
138155
- add following environment variable in your .env file.
139156
```
140-
HF_TOKEN = <Your_hugging_face_access_token>
157+
GROQ_API_KEY= <groq_api_key>
158+
PINECONE_API_KEY = <your_pinecone_API_KEY>
159+
PORT = 8000
160+
SEARCH_KEY = <your_Google_custom_search_engine_API_key>
141161
```
142162

143163
*Run backend:*
144164
```bash
145-
cd new-backend
165+
cd backend
146166
uv sync # Creating virtual environment at: .venv
147167
uv run main.py #Runs the backend server
148168
```
149169

150170
---
151171

172+
152173
## Architecture Diagram
153174

175+
154176
```mermaid
155177
graph TB
156178
%% Define Subgraphs with Colors and Text Styles
@@ -168,6 +190,7 @@ graph TB
168190
Analyzer[Content Analyzer]
169191
CNEngine[Counter-Narrative Engine]
170192
Context[Context Manager]
193+
171194
end
172195
173196
subgraph AI & NLP Layer
@@ -212,7 +235,7 @@ graph TB
212235

213236
## Required Skills
214237

215-
- **Frontend Development**: Experience with Next.js and modern UI frameworks.
238+
- **Frontend Development**: Experience with Next.js and modern UI frameworks.
216239
- **Backend Development**: Proficiency in Python and FastAPI.
217240
- **AI & NLP**: Familiarity with LangChain, Langgraph, and prompt engineering techniques.
218241
- **Database Management**: Knowledge of vector databases system.

backend/app/modules/chat/__init__.py

Whitespace-only changes.
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
from sentence_transformers import SentenceTransformer
2+
3+
embedder = SentenceTransformer("all-MiniLM-L6-v2")
4+
5+
6+
def embed_query(query: str):
7+
8+
embeddings = embedder.encode(query).tolist()
9+
10+
return embeddings
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
from pinecone import Pinecone
2+
from dotenv import load_dotenv
3+
from app.modules.chat.embed_query import embed_query
4+
import os
5+
6+
load_dotenv()
7+
8+
pc = Pinecone(os.getenv("PINECONE_API_KEY"))
9+
index = pc.Index("perspective")
10+
11+
12+
def search_pinecone(query: str, top_k: int = 5):
13+
14+
embeddings = embed_query(query)
15+
16+
results = index.query(
17+
vector=embeddings,
18+
top_k=top_k,
19+
include_metadata=True,
20+
namespace="default"
21+
22+
)
23+
24+
matches = []
25+
for match in results["matches"]:
26+
matches.append({
27+
"id": match["id"],
28+
"score": match["score"],
29+
"metadata": match["metadata"]
30+
})
31+
return matches
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
import os
2+
from groq import Groq
3+
from dotenv import load_dotenv
4+
5+
load_dotenv()
6+
7+
client = Groq(api_key=os.getenv("GROQ_API_KEY"))
8+
9+
10+
def build_context(docs):
11+
12+
return "\n".join(f"{m['metadata'].get('explanation') or m['metadata'].get('reasoning', '')}"for m in docs)
13+
14+
15+
def ask_llm(question, docs):
16+
context = build_context(docs)
17+
print(context)
18+
prompt = f"""You are an assistant that answers based on context.
19+
20+
Context:
21+
{context}
22+
23+
Question:
24+
{question}
25+
"""
26+
27+
response = client.chat.completions.create(
28+
model="gemma2-9b-it",
29+
messages=[
30+
{"role": "system", "content": "Use only the context to answer."},
31+
{"role": "user", "content": prompt}
32+
]
33+
)
34+
35+
return response.choices[0].message.content

backend/app/modules/vector_store/embed.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,3 +28,4 @@ def embed_chunks(chunks: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
2828
"metadata": chunk["metadata"]
2929
})
3030
return vectors
31+

backend/app/routes/routes.py

Lines changed: 23 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,31 +3,49 @@
33
from app.modules.pipeline import run_scraper_pipeline
44
from app.modules.pipeline import run_langgraph_workflow
55
from app.modules.bias_detection.check_bias import check_bias
6+
from app.modules.chat.get_rag_data import search_pinecone
7+
from app.modules.chat.llm_processing import ask_llm
68
import asyncio
79
import json
810

911
router = APIRouter()
1012

13+
1114
class URlRequest(BaseModel):
1215
url: str
1316

1417

18+
class ChatQuery(BaseModel):
19+
message: str
20+
21+
1522
@router.get("/")
1623
async def home():
1724
return {"message": "Perspective API is live!"}
1825

26+
1927
@router.post("/bias")
2028
async def bias_detection(request: URlRequest):
21-
content = await asyncio.to_thread(run_scraper_pipeline,(request.url))
22-
bias_score = await asyncio.to_thread(check_bias,(content))
29+
content = await asyncio.to_thread(run_scraper_pipeline, (request.url))
30+
bias_score = await asyncio.to_thread(check_bias, (content))
2331
print(bias_score)
2432
return bias_score
25-
2633

2734

2835
@router.post("/process")
2936
async def run_pipelines(request: URlRequest):
30-
article_text = await asyncio.to_thread(run_scraper_pipeline,(request.url))
37+
article_text = await asyncio.to_thread(run_scraper_pipeline, (request.url))
3138
print(json.dumps(article_text, indent=2))
32-
data = await asyncio.to_thread(run_langgraph_workflow,(article_text))
39+
data = await asyncio.to_thread(run_langgraph_workflow, (article_text))
3340
return data
41+
42+
43+
@router.post("/chat")
44+
async def answer_query(request: ChatQuery):
45+
46+
query = request.message
47+
results = search_pinecone(query)
48+
answer = ask_llm(query, results)
49+
print(answer)
50+
51+
return {"answer": answer}

frontend/app/analyze/results/page.tsx

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ import {
2020
import { Input } from "@/components/ui/input";
2121
import { Badge } from "@/components/ui/badge";
2222
import BiasMeter from "@/components/bias-meter";
23+
import axios from "axios";
2324

2425

2526
/**
@@ -79,23 +80,24 @@ export default function AnalyzePage() {
7980

8081
}, [router]);
8182

82-
const handleSendMessage = (e: React.FormEvent) => {
83+
async function handleSendMessage(e: React.FormEvent){
8384
e.preventDefault();
8485
if (!message.trim()) return;
8586
const newMessages = [...messages, { role: "user", content: message }];
8687
setMessages(newMessages);
8788
setMessage("");
88-
setTimeout(() => {
89-
setMessages([
90-
...newMessages,
91-
{
92-
role: "system",
93-
content:
94-
"Based on the article... let me know if you want more details.",
95-
},
96-
]);
97-
}, 1000);
98-
};
89+
90+
const res = await axios.post("http://Thunder1245-perspective-backend.hf.space/api/chat", {
91+
message: message
92+
});
93+
const data = res.data;
94+
95+
console.log(data)
96+
97+
// 🔹 Step 2: Append LLM’s response
98+
setMessages([...newMessages, { role: "assistant", content: data.answer }]);
99+
};
100+
99101

100102
if (isLoading || !analysisData || !biasScore) {
101103
return (
@@ -113,7 +115,7 @@ export default function AnalyzePage() {
113115
score,
114116
} = analysisData;
115117

116-
return (
118+
return(
117119
<div className="flex flex-col min-h-screen">
118120
{/* Header omitted for brevity */}
119121
<main className="flex-1 pt-16 container mx-auto px-4">

0 commit comments

Comments
 (0)