@@ -102,9 +102,7 @@ def append_debug(msg):
102102 st .session_state .debug = st .session_state .debug [- 200 :]
103103
104104def update_chat_history (user_message , bot_reply , intent ):
105- # chitchat is NOT added to medical history
106- if intent .lower () != "chitchat" :
107- st .session_state .history_pairs .append ((user_message , bot_reply , intent ))
105+ st .session_state .history_pairs .append ((user_message , bot_reply , intent ))
108106 # if we exceed MAX_VERBATIM_PAIRS, summarize older
109107 if len (st .session_state .history_pairs ) > MAX_VERBATIM_PAIRS :
110108 # old entries to compress
@@ -486,55 +484,71 @@ def handle_chitchat(user_message, chat_history):
486484 return "Whoa, let’s keep it polite, please! 😊"
487485
488486# -------------------- MAIN PIPELINE (called by UI) --------------------
489- def medical_pipeline (user_message ):
487+ def medical_pipeline (user_message : str ):
490488 init_session ()
491489 chat_history = get_chat_context ()
492490 append_debug (f"[pipeline] chat_history (summary+last3): { chat_history [:400 ]} " )
493491
494- # 2 ) classify
492+ # 1 ) classify
495493 label , reason = classify_message (chat_history , user_message )
496494 append_debug (f"[pipeline] classifier -> { label } ({ reason } )" )
497495
498- # 3) reformulate (handles follow-ups, abbrs, corrections)
499- rewritten , correction = reformulate_query (chat_history , user_message , st .session_state .last_suggested or "" ,label )
496+ # 2) reformulate (handles follow-ups, abbrs, corrections)
497+ rewritten , correction = reformulate_query (
498+ chat_history ,
499+ user_message ,
500+ st .session_state .last_suggested or "" ,
501+ label
502+ )
500503 append_debug (f"[pipeline] reformulated: { rewritten } | correction: { correction } " )
501504
505+ # ---- CHITCHAT ----
502506 if rewritten .endswith ("_chitchat" ):
503507 reply = handle_chitchat (user_message , chat_history )
504- # update_chat_history(user_message, reply, "chitchat")
508+ update_chat_history (user_message , reply , "chitchat" )
505509 return reply , "chitchat" , None
506510
507- # 4) hybrid retrieval
511+ # ---- HYBRID RETRIEVAL ----
508512 candidates = hybrid_retrieve (rewritten ) # vector + bm25 + rerank
509513 append_debug (f"[pipeline] retrieved { len (candidates )} re-ranked candidates" )
510514
511515 judge = judge_sufficiency (rewritten , candidates )
512- append_debug (f"[pipeline] judge selected { len (judge ['answer_chunks' ])} answer chunks, { len (judge ['followup_chunks' ])} follow-up chunks" )
516+ append_debug (
517+ f"[pipeline] judge selected { len (judge ['answer_chunks' ])} answer chunks, "
518+ f"{ len (judge ['followup_chunks' ])} follow-up chunks"
519+ )
513520
521+ # ---- JUDGE → SYNTHESIZE ----
514522 if judge ["answer_chunks" ]:
515523 top4 = judge ["answer_chunks" ]
516524 followup_candidates = judge ["followup_chunks" ]
525+
517526 followup_q = ""
518527 if followup_candidates :
519528 fc = followup_candidates [0 ]
520529 sec = fc ["meta" ].get ("section" ) if fc .get ("meta" ) else None
521530 followup_q = sec or (fc ["text" ])
531+
522532 answer = synthesize_answer (rewritten , top4 , followup_q )
533+
534+ # Apply correction prefix if needed
535+ if label != "FOLLOW_UP" and correction :
536+ correction_msg = "I guess you meant " + " and " .join (correction .values ())
537+ answer = correction_msg + "\n " + answer
538+
523539 update_chat_history (user_message , answer , "answer" )
524- print ("label is " ,label )
525- print ("correction :" ,correction )
526- if label != "FOLLOW_UP" :
527- print ("he" )
528- if len (correction )!= 0 :
529- print ("eheh" )
530- answer = f"I guess you meant { 'and' .join (i for i in list (correction .values ()))} " + '\n ' + answer
531540 return answer , "answer" , candidates [:6 ]
541+
532542 else :
533- msg = "I apologize, but I do not have sufficient information to answer this question accurately."
543+ msg = (
544+ "I apologize, but I do not have sufficient information "
545+ "to answer this question accurately."
546+ )
534547 update_chat_history (user_message , msg , "no_context" )
535548 return msg , "no_context" , None
536549
537- # -------------------- STREAMLIT UI (example) --------------------
550+
551+ # -------------------- STREAMLIT UI --------------------
538552st .set_page_config (page_title = "Medical Chatbot — Hybrid RAG" , layout = "centered" )
539553st .title ("🩺 Medical Chatbot — Hybrid (Vector + BM25 + Re-rank)" )
540554
@@ -547,9 +561,8 @@ def medical_pipeline(user_message):
547561if user_input :
548562 with st .spinner ("Thinking..." ):
549563 reply , intent , candidates = medical_pipeline (user_input )
550- #st.session_state.history_pairs.append((user_input, reply, intent))
551564
552- # Render chat history (verbatim last 3 + UI )
565+ # Render chat history (verbatim last 3 + summary window handled internally )
553566for q , a , intent in reversed (st .session_state .history_pairs ):
554567 with st .chat_message ("user" ):
555568 st .markdown (q )
0 commit comments