diff --git a/gc.c b/gc.c index 50868ea390b001..1f441b60e56f0c 100644 --- a/gc.c +++ b/gc.c @@ -2475,6 +2475,9 @@ rb_gc_before_updating_jit_code(void) #if USE_YJIT rb_yjit_mark_all_writeable(); #endif +#if USE_ZJIT + rb_zjit_mark_all_writable(); +#endif } /* @@ -2488,6 +2491,9 @@ rb_gc_after_updating_jit_code(void) #if USE_YJIT rb_yjit_mark_all_executable(); #endif +#if USE_ZJIT + rb_zjit_mark_all_executable(); +#endif } static void diff --git a/prism_compile.c b/prism_compile.c index 85e8a2cdfa40d7..d30785bb883dc2 100644 --- a/prism_compile.c +++ b/prism_compile.c @@ -11362,12 +11362,6 @@ pm_parse_process(pm_parse_result_t *result, pm_node_t *node, VALUE *script_lines pm_intern_constants_ctx_t intern_ctx = { .constants = scope_node->constants, .encoding = scope_node->encoding, .index = 0 }; pm_parser_constants_each(parser, pm_intern_constants_callback, &intern_ctx); - pm_constant_id_list_t *locals = &scope_node->locals; - pm_index_lookup_table_init_heap(&scope_node->index_lookup_table, (int) constants_size); - for (size_t index = 0; index < locals->size; index++) { - pm_index_lookup_table_insert(&scope_node->index_lookup_table, locals->ids[index], (int) index); - } - // If we got here, this is a success and we can return Qnil to indicate that // no error should be raised. result->parsed = true; diff --git a/zjit.h b/zjit.h index e96caa257c6b91..d67c8b82f21efe 100644 --- a/zjit.h +++ b/zjit.h @@ -40,6 +40,8 @@ void rb_zjit_invalidate_no_ep_escape(const rb_iseq_t *iseq); void rb_zjit_constant_state_changed(ID id); void rb_zjit_iseq_mark(void *payload); void rb_zjit_iseq_update_references(void *payload); +void rb_zjit_mark_all_writable(void); +void rb_zjit_mark_all_executable(void); void rb_zjit_iseq_free(const rb_iseq_t *iseq); void rb_zjit_before_ractor_spawn(void); void rb_zjit_tracing_invalidate_all(void); diff --git a/zjit/src/asm/mod.rs b/zjit/src/asm/mod.rs index 1e8f3414ec4f81..6583476594a3bf 100644 --- a/zjit/src/asm/mod.rs +++ b/zjit/src/asm/mod.rs @@ -294,6 +294,10 @@ impl CodeBlock { } /// Make all the code in the region executable. Call this at the end of a write session. + pub fn mark_all_writable(&mut self) { + self.mem_block.borrow_mut().mark_all_writable(); + } + pub fn mark_all_executable(&mut self) { self.mem_block.borrow_mut().mark_all_executable(); } diff --git a/zjit/src/gc.rs b/zjit/src/gc.rs index 239b71d5f48754..7f5bc7891f20cd 100644 --- a/zjit/src/gc.rs +++ b/zjit/src/gc.rs @@ -158,7 +158,9 @@ fn iseq_version_update_references(mut version: IseqVersionRef) { } } - // Move objects baked in JIT code + // Move objects baked in JIT code. + // The code region is already writable because rb_zjit_mark_all_writable() was called + // before the GC update_references phase. We write directly to avoid per-page mprotect calls. let cb = ZJITState::get_code_block(); for &offset in unsafe { version.as_ref() }.gc_offsets.iter() { let value_ptr: *const u8 = offset.raw_ptr(cb); @@ -170,13 +172,10 @@ fn iseq_version_update_references(mut version: IseqVersionRef) { // Only write when the VALUE moves, to be copy-on-write friendly. if new_addr != object { - for (byte_idx, &byte) in new_addr.as_u64().to_le_bytes().iter().enumerate() { - let byte_code_ptr = offset.add_bytes(byte_idx); - cb.write_mem(byte_code_ptr, byte).expect("patching existing code should be within bounds"); - } + let value_ptr = value_ptr as *mut VALUE; + unsafe { value_ptr.write_unaligned(new_addr) }; } } - cb.mark_all_executable(); } /// Append a set of gc_offsets to the iseq's payload @@ -211,6 +210,25 @@ fn ranges_overlap(left: &Range, right: &Range) -> bool where T: Partial left.start < right.end && right.start < left.end } +/// GC callback for making all JIT code writable before updating references in bulk. +/// This avoids toggling W^X permissions per-page during GC compaction. +#[unsafe(no_mangle)] +pub extern "C" fn rb_zjit_mark_all_writable() { + if !ZJITState::has_instance() { + return; + } + ZJITState::get_code_block().mark_all_writable(); +} + +/// GC callback for making all JIT code executable after updating references in bulk. +#[unsafe(no_mangle)] +pub extern "C" fn rb_zjit_mark_all_executable() { + if !ZJITState::has_instance() { + return; + } + ZJITState::get_code_block().mark_all_executable(); +} + /// Callback for marking GC objects inside [crate::invariants::Invariants]. #[unsafe(no_mangle)] pub extern "C" fn rb_zjit_root_mark() { diff --git a/zjit/src/virtualmem.rs b/zjit/src/virtualmem.rs index 9741a7b13867d5..c2a1e13a5dee83 100644 --- a/zjit/src/virtualmem.rs +++ b/zjit/src/virtualmem.rs @@ -265,6 +265,22 @@ impl VirtualMemory { memory_usage_bytes + self.page_size_bytes < memory_limit_bytes } + /// Make all the code in the region writable. Call this before bulk writes (e.g. GC + /// reference updates). See [Self] for usual usage flow. + pub fn mark_all_writable(&mut self) { + self.current_write_page = None; + + let region_start = self.region_start; + let mapped_region_bytes: u32 = self.mapped_region_bytes.try_into().unwrap(); + + // Make mapped region writable + if mapped_region_bytes > 0 { + if !self.allocator.mark_writable(region_start.as_ptr(), mapped_region_bytes) { + panic!("Cannot make JIT memory region writable"); + } + } + } + /// Make all the code in the region executable. Call this at the end of a write session. /// See [Self] for usual usage flow. pub fn mark_all_executable(&mut self) {