22#![ allow( unused_variables) ]
33#![ allow( unused_imports) ]
44
5+ use crate :: asm:: x86_64:: jmp_ptr;
56use crate :: asm:: { CodeBlock } ;
67use crate :: asm:: arm64:: * ;
78use crate :: codegen:: { JITState , CodegenGlobals } ;
@@ -38,8 +39,25 @@ pub const _C_RET_OPND: Opnd = Opnd::Reg(X0_REG);
3839pub const C_SP_REG : A64Opnd = X31 ;
3940pub const C_SP_STEP : i32 = 16 ;
4041
41- // The number of bytes that are generated by emit_jmp_ptr
42- pub const JMP_PTR_BYTES : usize = 20 ;
42+ impl CodeBlock {
43+ // The maximum number of bytes that can be generated by emit_jmp_ptr.
44+ pub fn jmp_ptr_bytes ( & self ) -> usize {
45+ // b instruction's offset is encoded as imm26 times 4. It can jump to
46+ // +/-128MiB, so this can be used when --yjit-exec-mem-size <= 128.
47+ let num_insns = if b_offset_fits_bits ( self . virtual_region_size ( ) as i64 / 4 ) {
48+ 1 // b instruction
49+ } else {
50+ 5 // 4 instructions to load a 64-bit absolute address + br instruction
51+ } ;
52+ num_insns * 4
53+ }
54+
55+ // The maximum number of instructions that can be generated by emit_conditional_jump.
56+ fn conditional_jump_insns ( & self ) -> i32 {
57+ // The worst case is instructions for a jump + bcond.
58+ self . jmp_ptr_bytes ( ) as i32 / 4 + 1
59+ }
60+ }
4361
4462/// Map Opnd to A64Opnd
4563impl From < Opnd > for A64Opnd {
@@ -110,7 +128,8 @@ fn emit_jmp_ptr(cb: &mut CodeBlock, dst_ptr: CodePtr, padding: bool) {
110128 // Make sure it's always a consistent number of
111129 // instructions in case it gets patched and has to
112130 // use the other branch.
113- for _ in num_insns..( JMP_PTR_BYTES / 4 ) {
131+ assert ! ( num_insns * 4 <= cb. jmp_ptr_bytes( ) ) ;
132+ for _ in num_insns..( cb. jmp_ptr_bytes ( ) / 4 ) {
114133 nop ( cb) ;
115134 }
116135 }
@@ -697,6 +716,18 @@ impl Assembler
697716 // Here we're going to return 1 because we've only
698717 // written out 1 instruction.
699718 1
719+ } else if b_offset_fits_bits ( ( dst_addr - ( src_addr + 4 ) ) / 4 ) { // + 4 for bcond
720+ // If the jump offset fits into the unconditional jump as
721+ // an immediate value, we can use inverse b.cond + b.
722+ //
723+ // We're going to write out the inverse condition so
724+ // that if it doesn't match it will skip over the
725+ // instruction used for branching.
726+ bcond ( cb, Condition :: inverse ( CONDITION ) , 2 . into ( ) ) ;
727+ b ( cb, InstructionOffset :: from_bytes ( ( dst_addr - ( src_addr + 4 ) ) as i32 ) ) ; // + 4 for bcond
728+
729+ // We've only written out 2 instructions.
730+ 2
700731 } else {
701732 // Otherwise, we need to load the address into a
702733 // register and use the branch register instruction.
@@ -720,7 +751,8 @@ impl Assembler
720751 // We need to make sure we have at least 6 instructions for
721752 // every kind of jump for invalidation purposes, so we're
722753 // going to write out padding nop instructions here.
723- for _ in num_insns..6 { nop ( cb) ; }
754+ assert ! ( num_insns <= cb. conditional_jump_insns( ) ) ;
755+ for _ in num_insns..cb. conditional_jump_insns ( ) { nop ( cb) ; }
724756 }
725757 } ,
726758 Target :: Label ( label_idx) => {
@@ -1063,7 +1095,7 @@ impl Assembler
10631095 Insn :: RegTemps ( _) |
10641096 Insn :: SpillTemp ( _) => ( ) , // just a reg alloc signal, no code
10651097 Insn :: PadInvalPatch => {
1066- while ( cb. get_write_pos ( ) . saturating_sub ( std:: cmp:: max ( start_write_pos, cb. page_start_pos ( ) ) ) ) < JMP_PTR_BYTES && !cb. has_dropped_bytes ( ) {
1098+ while ( cb. get_write_pos ( ) . saturating_sub ( std:: cmp:: max ( start_write_pos, cb. page_start_pos ( ) ) ) ) < cb . jmp_ptr_bytes ( ) && !cb. has_dropped_bytes ( ) {
10671099 nop ( cb) ;
10681100 }
10691101 }
0 commit comments