|
1 | 1 | use actix_web::{get, web, HttpResponse, Responder}; |
2 | 2 | use actix_web_lab::sse::{self, Sse}; |
| 3 | +use futures::StreamExt; |
3 | 4 | use sqlx::PgPool; |
4 | | -use std::sync::Arc; |
5 | | -use tokio::sync::broadcast; |
6 | 5 | use uuid::Uuid; |
7 | 6 |
|
8 | 7 | use crate::db::{get_logs, LogEntry}; |
9 | 8 |
|
10 | 9 | pub struct AppState { |
11 | 10 | pub pool: PgPool, |
12 | | - pub broadcaster: Arc<broadcast::Sender<LogEntry>>, |
| 11 | + pub nats_client: async_nats::Client, |
13 | 12 | } |
14 | 13 |
|
15 | | -#[get("/health")] |
| 14 | +// Helper struct for creating SSE log events from raw data |
| 15 | +struct LogEventData { |
| 16 | + date: i64, |
| 17 | + level: String, |
| 18 | + message: String, |
| 19 | +} |
| 20 | + |
| 21 | +impl Into<sse::Data> for LogEntry { |
| 22 | + fn into(self) -> sse::Data { |
| 23 | + sse::Data::new_json(serde_json::json!({ |
| 24 | + "date": self.date.timestamp_millis(), |
| 25 | + "level": format!("{:?}", self.level).to_lowercase(), |
| 26 | + "message": self.message |
| 27 | + })) |
| 28 | + .unwrap() |
| 29 | + .event("log") |
| 30 | + } |
| 31 | +} |
| 32 | + |
| 33 | +impl Into<sse::Data> for LogEventData { |
| 34 | + fn into(self) -> sse::Data { |
| 35 | + sse::Data::new_json(serde_json::json!({ |
| 36 | + "date": self.date, |
| 37 | + "level": self.level, |
| 38 | + "message": self.message |
| 39 | + })) |
| 40 | + .unwrap() |
| 41 | + .event("log") |
| 42 | + } |
| 43 | +} |
| 44 | + |
| 45 | +#[get("/api/v1/health")] |
16 | 46 | pub async fn health() -> impl Responder { |
17 | 47 | HttpResponse::Ok().json(serde_json::json!({ |
18 | 48 | "status": "ok", |
19 | 49 | "service": "openworkers-logs" |
20 | 50 | })) |
21 | 51 | } |
22 | 52 |
|
23 | | -#[get("/logs/{worker_id}")] |
24 | | -pub async fn get_worker_logs( |
| 53 | +#[get("/api/v1/workers/{worker_id}/logs")] |
| 54 | +pub async fn stream_worker_logs( |
25 | 55 | worker_id: web::Path<Uuid>, |
26 | 56 | data: web::Data<AppState>, |
27 | 57 | ) -> impl Responder { |
28 | | - match get_logs(&data.pool, *worker_id, 100).await { |
29 | | - Ok(logs) => HttpResponse::Ok().json(logs), |
| 58 | + let worker_id = *worker_id; |
| 59 | + |
| 60 | + // Fetch last 10 logs from database |
| 61 | + let historical_logs = match get_logs(&data.pool, worker_id, 10).await { |
| 62 | + Ok(logs) => logs, |
30 | 63 | Err(e) => { |
31 | | - log::error!("Failed to fetch logs: {:?}", e); |
32 | | - HttpResponse::InternalServerError().json(serde_json::json!({ |
33 | | - "error": "Failed to fetch logs" |
34 | | - })) |
| 64 | + log::error!("Failed to fetch historical logs: {:?}", e); |
| 65 | + vec![] |
35 | 66 | } |
| 67 | + }; |
| 68 | + |
| 69 | + // Subscribe to NATS for this specific worker's logs |
| 70 | + let subject = format!("{}.console.>", worker_id); |
| 71 | + let nats_sub = data.nats_client.subscribe(subject).await.ok(); |
| 72 | + |
| 73 | + if nats_sub.is_none() { |
| 74 | + log::error!("Failed to subscribe to NATS"); |
36 | 75 | } |
37 | | -} |
38 | 76 |
|
39 | | -#[get("/logs/{worker_id}/stream")] |
40 | | -pub async fn stream_worker_logs( |
41 | | - worker_id: web::Path<Uuid>, |
42 | | - data: web::Data<AppState>, |
43 | | -) -> impl Responder { |
44 | | - let worker_id = *worker_id; |
45 | | - let mut rx = data.broadcaster.subscribe(); |
| 77 | + let mut nats_sub = nats_sub.unwrap(); |
| 78 | + |
| 79 | + let mut id_counter: u64 = 0; |
46 | 80 |
|
47 | 81 | let stream = async_stream::stream! { |
48 | | - while let Ok(log) = rx.recv().await { |
49 | | - // Only send logs for this worker |
50 | | - if log.worker_id == worker_id { |
51 | | - let event = sse::Event::Data( |
52 | | - sse::Data::new_json(serde_json::json!({ |
53 | | - "date": log.date, |
54 | | - "message": log.message, |
55 | | - "level": log.level |
56 | | - })) |
57 | | - .unwrap() |
58 | | - ); |
59 | | - yield Ok::<_, actix_web::Error>(event); |
60 | | - } |
| 82 | + // First, yield historical logs in reverse order (oldest first) |
| 83 | + for log in historical_logs.into_iter().rev() { |
| 84 | + let mut data: sse::Data = log.into(); |
| 85 | + data.set_id(format!("{}", id_counter)); |
| 86 | + id_counter += 1; |
| 87 | + yield Ok::<_, actix_web::Error>(sse::Event::Data(data)); |
| 88 | + } |
| 89 | + |
| 90 | + // Then stream new logs in real-time from NATS |
| 91 | + while let Some(msg) = nats_sub.next().await { |
| 92 | + // Parse level from subject: {worker_id}.console.{level} |
| 93 | + let level_str = msg.subject.split('.').nth(2).unwrap_or("info"); |
| 94 | + |
| 95 | + let message = match String::from_utf8(msg.payload.to_vec()) { |
| 96 | + Ok(m) => m, |
| 97 | + Err(_) => continue, |
| 98 | + }; |
| 99 | + |
| 100 | + let data = LogEventData { |
| 101 | + date: chrono::Utc::now().timestamp_millis(), |
| 102 | + level: level_str.to_string(), |
| 103 | + message, |
| 104 | + }; |
| 105 | + |
| 106 | + let mut data: sse::Data = data.into(); |
| 107 | + data.set_id(format!("{}", id_counter)); |
| 108 | + id_counter += 1; |
| 109 | + |
| 110 | + yield Ok::<_, actix_web::Error>(sse::Event::Data(data)); |
61 | 111 | } |
62 | 112 | }; |
63 | 113 |
|
|
0 commit comments