Skip to content

Commit 55ff9bd

Browse files
committed
fix: concurrent isolate interleaving + warm reuse test
Add test_concurrent_pinned_interleaving that reproduces the "PinnedRef and Context do not belong to the same Isolate" panic when two execute_pinned tasks interleave on the same thread. Fix incorrect comments about Locker safety in worker_pool.rs. Bump runtime-v8 to v0.13.5 (IsolateGuard fix). Bump to 0.13.8.
1 parent aa8af15 commit 55ff9bd

4 files changed

Lines changed: 73 additions & 8 deletions

File tree

Cargo.lock

Lines changed: 4 additions & 4 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[package]
22
name = "openworkers-runner"
3-
version = "0.13.7"
3+
version = "0.13.8"
44
edition = "2024"
55
license = "MIT"
66
default-run = "openworkers-runner"
@@ -62,7 +62,7 @@ openworkers-core = { git = "https://github.com/openworkers/openworkers-core", ta
6262
openworkers-transform = { git = "https://github.com/openworkers/openworkers-transform", tag = "v0.1.0" }
6363

6464
# Runtime backend (v8 only for now, others require older version of core)
65-
openworkers-runtime-v8 = { git = "https://github.com/openworkers/openworkers-runtime-v8", tag = "v0.13.4", optional = true, features = ["ptrcomp"] }
65+
openworkers-runtime-v8 = { git = "https://github.com/openworkers/openworkers-runtime-v8", tag = "v0.13.5", optional = true, features = ["ptrcomp"] }
6666

6767
# WASM runtime (optional)
6868
# openworkers-runtime-wasm = { path = "../openworkers-runtime-wasm", optional = true }

src/worker_pool.rs

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,8 +73,11 @@ impl SequentialWorkerPool {
7373

7474
// Persistent LocalSet enables interleaved execution:
7575
// while one task awaits I/O, the LocalSet polls other tasks.
76-
// Each task holds its own V8 locker on its own isolate,
77-
// so there's no V8 contention — just cooperative scheduling.
76+
// Each task holds its own V8 locker on its own isolate.
77+
// NOTE: V8 Lockers call Isolate::Enter() which sets the
78+
// thread-local "current isolate". With cooperative scheduling,
79+
// multiple Lockers coexist and IsolateGuard (enter/exit per
80+
// V8 work block) ensures GetCurrent() returns the correct isolate.
7881
rt.block_on(async {
7982
let local = tokio::task::LocalSet::new();
8083

tests/warm_reuse_test.rs

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -330,3 +330,65 @@ async fn test_warm_reuse_after_rejected_waituntil() {
330330
})
331331
.await;
332332
}
333+
334+
// =============================================================================
335+
// Concurrent interleaving test (V8 Locker + spawn_local)
336+
// =============================================================================
337+
338+
/// Two concurrent execute_pinned calls with async yields on the same thread.
339+
///
340+
/// This reproduces the V8 isolate interleaving bug:
341+
/// 1. Task A creates Locker(X), enters isolate X → GetCurrent() = X
342+
/// 2. Task A's worker does setTimeout(50ms) → event loop returns Pending
343+
/// 3. LocalSet polls Task B → Locker(Y) enters isolate Y → GetCurrent() = Y
344+
/// 4. Task B's worker also yields (setTimeout)
345+
/// 5. Task A resumes, creates HandleScope from X's raw pointer
346+
/// 6. ContextScope::new checks: X's pointer != GetCurrent() (Y) → PANIC
347+
///
348+
/// The panic: "PinnedRef and Context do not belong to the same Isolate"
349+
///
350+
/// This happens because v8::Locker calls Isolate::Enter() which sets the
351+
/// thread-local "current isolate". With cooperative scheduling (spawn_local),
352+
/// multiple Lockers coexist on the same thread with non-LIFO ordering.
353+
#[tokio::test]
354+
async fn test_concurrent_pinned_interleaving() {
355+
init_pool();
356+
357+
run_local(|| async {
358+
let script = Script::new(
359+
r#"
360+
addEventListener('fetch', (event) => {
361+
// Force async yield: respond after a delay.
362+
// During this delay, the event loop returns Pending,
363+
// allowing the LocalSet to poll another task.
364+
setTimeout(() => {
365+
event.respondWith(new Response('ok'));
366+
}, 50);
367+
});
368+
"#,
369+
);
370+
371+
// Spawn two concurrent tasks on the same LocalSet (same OS thread).
372+
// They get DIFFERENT isolates because they have different worker_ids.
373+
let handle_a = tokio::task::spawn_local({
374+
let script = script.clone();
375+
async move { pinned_fetch("interleave-a", 1, script).await }
376+
});
377+
378+
let handle_b = tokio::task::spawn_local({
379+
let script = script.clone();
380+
async move { pinned_fetch("interleave-b", 1, script).await }
381+
});
382+
383+
let (result_a, result_b) = tokio::join!(handle_a, handle_b);
384+
385+
let (status_a, body_a) = result_a.expect("Task A should not panic");
386+
let (status_b, body_b) = result_b.expect("Task B should not panic");
387+
388+
assert_eq!(status_a, 200);
389+
assert_eq!(body_a, "ok");
390+
assert_eq!(status_b, 200);
391+
assert_eq!(body_b, "ok");
392+
})
393+
.await;
394+
}

0 commit comments

Comments
 (0)