Skip to content

Commit 1b69fb1

Browse files
committed
fix: unify LLM model usage, env-based API URLs, and improve logging/search error handling
1 parent 68a64c3 commit 1b69fb1

10 files changed

Lines changed: 50 additions & 32 deletions

File tree

backend/app/logging/logging_config.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,14 +24,20 @@ def setup_logger(name: str) -> logging.Logger:
2424
datefmt="%Y-%m-%d %H:%M:%S"
2525
)
2626

27-
# Console Handler
27+
# Console Handler with UTF-8 encoding for Windows support
2828
console_handler = logging.StreamHandler(sys.stdout)
2929
console_handler.setLevel(logging.INFO)
3030
console_handler.setFormatter(formatter)
31+
# Enable UTF-8 encoding to handle emoji and special characters on Windows
32+
if hasattr(console_handler.stream, 'reconfigure'):
33+
try:
34+
console_handler.stream.reconfigure(encoding='utf-8')
35+
except (AttributeError, ValueError):
36+
pass
3137
logger.addHandler(console_handler)
3238

33-
# File Handler
34-
file_handler = logging.FileHandler("app.log")
39+
# File Handler with UTF-8 encoding
40+
file_handler = logging.FileHandler("app.log", encoding="utf-8")
3541
file_handler.setLevel(logging.DEBUG) # Keep detailed logs in file
3642
file_handler.setFormatter(formatter)
3743
logger.addHandler(file_handler)

backend/app/modules/bias_detection/check_bias.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def check_bias(text):
6161
"content": (f"Give bias score to the following article \n\n{text}"),
6262
},
6363
],
64-
model="gemma2-9b-it",
64+
model="llama-3.1-8b-instant",
6565
temperature=0.3,
6666
max_tokens=512,
6767
)

backend/app/modules/chat/llm_processing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def ask_llm(question, docs):
5555
"""
5656

5757
response = client.chat.completions.create(
58-
model="gemma2-9b-it",
58+
model="llama-3.1-8b-instant",
5959
messages=[
6060
{"role": "system", "content": "Use only the context to answer."},
6161
{"role": "user", "content": prompt},

backend/app/modules/facts_check/llm_processing.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ def run_claim_extractor_sdk(state):
6363
),
6464
},
6565
],
66-
model="gemma2-9b-it",
66+
model="llama-3.1-8b-instant",
6767
temperature=0.3,
6868
max_tokens=512,
6969
)
@@ -128,7 +128,7 @@ def run_fact_verifier_sdk(search_results):
128128
),
129129
},
130130
],
131-
model="gemma2-9b-it",
131+
model="llama-3.1-8b-instant",
132132
temperature=0.3,
133133
max_tokens=256,
134134
)

backend/app/modules/facts_check/web_search.py

Lines changed: 21 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -28,15 +28,24 @@
2828

2929

3030
def search_google(query):
31-
results = requests.get(
32-
f"https://www.googleapis.com/customsearch/v1?key={GOOGLE_SEARCH}&cx=f637ab77b5d8b4a3c&q={query}"
33-
)
34-
res = results.json()
35-
first = {}
36-
first["title"] = res["items"][0]["title"]
37-
first["link"] = res["items"][0]["link"]
38-
first["snippet"] = res["items"][0]["snippet"]
39-
40-
return [
41-
first,
42-
]
31+
try:
32+
results = requests.get(
33+
f"https://www.googleapis.com/customsearch/v1?key={GOOGLE_SEARCH}&cx=f637ab77b5d8b4a3c&q={query}"
34+
)
35+
res = results.json()
36+
37+
# Check if the response contains 'items' (successful search)
38+
if "items" not in res:
39+
# Handle error responses from Google API
40+
error_msg = res.get("error", {}).get("message", "Unknown error")
41+
raise ValueError(f"Google API Error: {error_msg}")
42+
43+
first = {}
44+
first["title"] = res["items"][0]["title"]
45+
first["link"] = res["items"][0]["link"]
46+
first["snippet"] = res["items"][0]["snippet"]
47+
48+
return [first]
49+
except Exception as e:
50+
print(f"Search Google Error: {e}")
51+
raise

backend/app/modules/langgraph_nodes/judge.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424

2525
# Init once
2626
groq_llm = ChatGroq(
27-
model="gemma2-9b-it",
27+
model="llama-3.1-8b-instant",
2828
temperature=0.0,
2929
max_tokens=10,
3030
)

backend/app/modules/langgraph_nodes/sentiment.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ def run_sentiment_sdk(state):
4949
),
5050
},
5151
],
52-
model="gemma2-9b-it",
52+
model="llama-3.1-8b-instant",
5353
temperature=0.2,
5454
max_tokens=3,
5555
)

backend/app/utils/fact_check_utils.py

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -45,36 +45,39 @@ def run_fact_check_pipeline(state):
4545
result = run_claim_extractor_sdk(state)
4646

4747
if state.get("status") != "success":
48-
logger.error("Claim extraction failed.")
48+
logger.error("Claim extraction failed.")
4949
return [], "Claim extraction failed."
5050

5151
# Step 1: Extract claims
5252
raw_output = result.get("verifiable_claims", "")
5353
claims = re.findall(r"^[\*\-•]\s+(.*)", raw_output, re.MULTILINE)
5454
claims = [claim.strip() for claim in claims if claim.strip()]
55-
logger.info(f"🧠 Extracted claims: {claims}")
55+
logger.info(f"Extracted claims: {claims}")
5656

5757
if not claims:
5858
return [], "No verifiable claims found."
5959

6060
# Step 2: Search each claim with polite delay
6161
search_results = []
6262
for claim in claims:
63-
logger.info(f"\n🔍 Searching for claim: {claim}")
63+
logger.info(f"Searching for claim: {claim}")
6464
try:
6565
results = search_google(claim)
6666
if results:
6767
results[0]["claim"] = claim
6868
search_results.append(results[0])
69-
logger.info(f"Found result: {results[0]['title']}")
69+
logger.info(f"Found result: {results[0]['title']}")
7070
else:
71-
logger.warning(f"⚠️ No search result for: {claim}")
71+
logger.warning(f"No search result for: {claim}")
7272
except Exception as e:
73-
logger.error(f"Search failed for: {claim} -> {e}")
73+
logger.error(f"Search failed for: {claim} -> {e}")
7474

7575
if not search_results:
76+
logger.error("All claim searches failed or returned no results.")
7677
return [], "All claim searches failed or returned no results."
7778

7879
# Step 3: Verify facts using LLM
80+
logger.info(f"Verifying {len(search_results)} claims using LLM...")
7981
final = run_fact_verifier_sdk(search_results)
82+
logger.info("Fact-checking pipeline completed successfully.")
8083
return final.get("verifications", []), None

frontend/app/analyze/loading/page.tsx

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ import {
1616
import ThemeToggle from "@/components/theme-toggle";
1717
import axios from "axios";
1818

19-
// const backend_url = process.env.NEXT_PUBLIC_API_URL;
19+
const backend_url = process.env.NEXT_PUBLIC_API_URL;
2020

2121

2222

@@ -74,10 +74,10 @@ export default function LoadingPage() {
7474

7575
try {
7676
const [processRes, biasRes] = await Promise.all([
77-
axios.post("https://thunder1245-perspective-backend.hf.space/api/process", {
77+
axios.post(`${backend_url}/api/process`, {
7878
url: storedUrl,
7979
}),
80-
axios.post("https://thunder1245-perspective-backend.hf.space/api/bias", {
80+
axios.post(`${backend_url}/api/bias`, {
8181
url: storedUrl,
8282
}),
8383
]);

frontend/app/analyze/results/page.tsx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ import { Badge } from "@/components/ui/badge";
1919
import BiasMeter from "@/components/bias-meter";
2020
import axios from "axios";
2121

22-
// const backend_url = process.env.NEXT_PUBLIC_API_URL;
22+
const backend_url = process.env.NEXT_PUBLIC_API_URL;
2323

2424
/**
2525
* Renders the article analysis page with summary, perspectives, fact checks, bias meter, AI chat, and sources.
@@ -85,7 +85,7 @@ export default function AnalyzePage() {
8585
setMessages(newMessages);
8686
setMessage("");
8787

88-
const res = await axios.post("https://thunder1245-perspective-backend.hf.space/api/chat", {
88+
const res = await axios.post(`${backend_url}/api/chat`, {
8989
message: message,
9090
});
9191
const data = res.data;

0 commit comments

Comments
 (0)