Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 56 additions & 0 deletions ext/coverage/coverage.c
Original file line number Diff line number Diff line change
Expand Up @@ -600,6 +600,62 @@ rb_coverage_running(VALUE klass)
* 5. The ending line number the method appears on in the file.
* 6. The ending column number the method appears on in the file.
*
* == Eval \Coverage
*
* Eval coverage can be combined with the coverage types above to track
* coverage for eval.
*
* require "coverage"
* Coverage.start(eval: true, lines: true)
*
* eval(<<~RUBY, nil, "eval 1")
* ary = []
* 10.times do |i|
* ary << "hello" * i
* end
* RUBY
*
* Coverage.result # => {"eval 1" => {lines: [1, 1, 10, nil]}}
*
* Note that the eval must have a filename assigned, otherwise coverage
* will not be measured.
*
* require "coverage"
* Coverage.start(eval: true, lines: true)
*
* eval(<<~RUBY)
* ary = []
* 10.times do |i|
* ary << "hello" * i
* end
* RUBY
*
* Coverage.result # => {"(eval)" => {lines: [nil, nil, nil, nil]}}
*
* Also note that if a line number is assigned to the eval and it is not 1,
* then the resulting coverage will be padded with +nil+ if the line number is
* greater than 1, and truncated if the line number is less than 1.
*
* require "coverage"
* Coverage.start(eval: true, lines: true)
*
* eval(<<~RUBY, nil, "eval 1", 3)
* ary = []
* 10.times do |i|
* ary << "hello" * i
* end
* RUBY
*
* eval(<<~RUBY, nil, "eval 2", -1)
* ary = []
* 10.times do |i|
* ary << "hello" * i
* end
* RUBY
*
* Coverage.result
* # => {"eval 1" => {lines: [nil, nil, 1, 1, 10, nil]}, "eval 2" => {lines: [10, nil]}}
*
* == All \Coverage Modes
*
* You can also run all modes of coverage simultaneously with this shortcut.
Expand Down
36 changes: 36 additions & 0 deletions test/ruby/test_zjit.rb
Original file line number Diff line number Diff line change
Expand Up @@ -470,6 +470,42 @@ def test(&block)
}, insns: [:getblockparamproxy]
end

def test_getblockparam
assert_compiles '2', %q{
def test(&blk)
blk
end
test { 2 }.call
test { 2 }.call
}, insns: [:getblockparam]
end

def test_getblockparam_proxy_side_exit_restores_block_local
assert_compiles '2', %q{
def test(&block)
b = block
# sideexits here
raise "test" unless block
b ? 2 : 3
end
test {}
test {}
}, insns: [:getblockparam, :getblockparamproxy]
end

def test_getblockparam_used_twice_in_args
assert_compiles '1', %q{
def f(*args) = args
def test(&blk)
b = blk
f(*[1], blk)
blk
end
test {1}.call
test {1}.call
}, insns: [:getblockparam]
end

def test_optimized_method_call_proc_call
assert_compiles '2', %q{
p = proc { |x| x * 2 }
Expand Down
114 changes: 78 additions & 36 deletions zjit/src/backend/arm64/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -694,7 +694,8 @@ impl Assembler {
/// VRegs, most splits should happen in [`Self::arm64_split`]. However, some instructions
/// need to be split with registers after `alloc_regs`, e.g. for `compile_exits`, so this
/// splits them and uses scratch registers for it.
fn arm64_scratch_split(mut self) -> Assembler {
/// Linearizes all blocks into a single giant block.
fn arm64_scratch_split(self) -> Assembler {
/// If opnd is Opnd::Mem with a too large disp, make the disp smaller using lea.
fn split_large_disp(asm: &mut Assembler, opnd: Opnd, scratch_opnd: Opnd) -> Opnd {
match opnd {
Expand Down Expand Up @@ -750,12 +751,23 @@ impl Assembler {
// Prepare StackState to lower MemBase::Stack
let stack_state = StackState::new(self.stack_base_idx);

let mut asm_local = Assembler::new_with_asm(&self);
let mut asm_local = Assembler::new();
asm_local.accept_scratch_reg = true;
asm_local.stack_base_idx = self.stack_base_idx;
asm_local.label_names = self.label_names.clone();
asm_local.live_ranges.resize(self.live_ranges.len(), LiveRange { start: None, end: None });

// Create one giant block to linearize everything into
asm_local.new_block_without_id();

let asm = &mut asm_local;
asm.accept_scratch_reg = true;
let iterator = &mut self.instruction_iterator();

while let Some((_, mut insn)) = iterator.next(asm) {
// Get linearized instructions with branch parameters expanded into ParallelMov
let linearized_insns = self.linearize_instructions();

// Process each linearized instruction
for (idx, insn) in linearized_insns.iter().enumerate() {
let mut insn = insn.clone();
match &mut insn {
Insn::Add { left, right, out } |
Insn::Sub { left, right, out } |
Expand Down Expand Up @@ -795,7 +807,7 @@ impl Assembler {
};

// If the next instruction is JoMul
if matches!(iterator.peek(), Some((_, Insn::JoMul(_)))) {
if idx + 1 < linearized_insns.len() && matches!(linearized_insns[idx + 1], Insn::JoMul(_)) {
// Produce a register that is all zeros or all ones
// Based on the sign bit of the 64-bit mul result
asm.push_insn(Insn::RShift { out: SCRATCH0_OPND, opnd: reg_out, shift: Opnd::UImm(63) });
Expand Down Expand Up @@ -940,7 +952,7 @@ impl Assembler {

/// Emit a conditional jump instruction to a specific target. This is
/// called when lowering any of the conditional jump instructions.
fn emit_conditional_jump<const CONDITION: u8>(cb: &mut CodeBlock, target: Target) {
fn emit_conditional_jump<const CONDITION: u8>(asm: &Assembler, cb: &mut CodeBlock, target: Target) {
fn generate_branch<const CONDITION: u8>(cb: &mut CodeBlock, src_addr: i64, dst_addr: i64) {
let num_insns = if bcond_offset_fits_bits((dst_addr - src_addr) / 4) {
// If the jump offset fits into the conditional jump as
Expand Down Expand Up @@ -991,30 +1003,31 @@ impl Assembler {
(num_insns..cb.conditional_jump_insns()).for_each(|_| nop(cb));
}

match target {
let label = match target {
Target::CodePtr(dst_ptr) => {
let dst_addr = dst_ptr.as_offset();
let src_addr = cb.get_write_ptr().as_offset();
generate_branch::<CONDITION>(cb, src_addr, dst_addr);
return;
},
Target::Label(label_idx) => {
// Try to use a single B.cond instruction
cb.label_ref(label_idx, 4, |cb, src_addr, dst_addr| {
// +1 since src_addr is after the instruction while A64
// counts the offset relative to the start.
let offset = (dst_addr - src_addr) / 4 + 1;
if bcond_offset_fits_bits(offset) {
bcond(cb, CONDITION, InstructionOffset::from_insns(offset as i32));
Ok(())
} else {
Err(())
}
});
},
Target::Label(l) => l,
Target::Block(ref edge) => asm.block_label(edge.target),
Target::SideExit { .. } => {
unreachable!("Target::SideExit should have been compiled by compile_exits")
},
};
// Try to use a single B.cond instruction
cb.label_ref(label, 4, |cb, src_addr, dst_addr| {
// +1 since src_addr is after the instruction while A64
// counts the offset relative to the start.
let offset = (dst_addr - src_addr) / 4 + 1;
if bcond_offset_fits_bits(offset) {
bcond(cb, CONDITION, InstructionOffset::from_insns(offset as i32));
Ok(())
} else {
Err(())
}
});
}

/// Emit a CBZ or CBNZ which branches when a register is zero or non-zero
Expand Down Expand Up @@ -1117,8 +1130,13 @@ impl Assembler {
let (_hook, mut hook_insn_idx) = AssemblerPanicHook::new(self, 0);

// For each instruction
// NOTE: At this point, the assembler should have been linearized into a single giant block
// by either resolve_parallel_mov_pass() or arm64_scratch_split().
let mut insn_idx: usize = 0;
while let Some(insn) = self.insns.get(insn_idx) {
assert_eq!(self.basic_blocks.len(), 1, "Assembler should be linearized into a single block before arm64_emit");
let insns = &self.basic_blocks[0].insns;

while let Some(insn) = insns.get(insn_idx) {
// Update insn_idx that is shown on panic
hook_insn_idx.as_mut().map(|idx| idx.lock().map(|mut idx| *idx = insn_idx).unwrap());

Expand Down Expand Up @@ -1222,7 +1240,7 @@ impl Assembler {
},
Insn::Mul { left, right, out } => {
// If the next instruction is JoMul with RShift created by arm64_scratch_split
match (self.insns.get(insn_idx + 1), self.insns.get(insn_idx + 2)) {
match (insns.get(insn_idx + 1), insns.get(insn_idx + 2)) {
(Some(Insn::RShift { out: out_sign, opnd: out_opnd, shift: out_shift }), Some(Insn::JoMul(_))) => {
// Compute the high 64 bits
smulh(cb, Self::EMIT_OPND, left.into(), right.into());
Expand Down Expand Up @@ -1487,34 +1505,48 @@ impl Assembler {
}
});
},
Target::Block(ref edge) => {
let label = self.block_label(edge.target);
cb.label_ref(label, 4, |cb, src_addr, dst_addr| {
// +1 since src_addr is after the instruction while A64
// counts the offset relative to the start.
let offset = (dst_addr - src_addr) / 4 + 1;
if b_offset_fits_bits(offset) {
b(cb, InstructionOffset::from_insns(offset as i32));
Ok(())
} else {
Err(())
}
});
},
Target::SideExit { .. } => {
unreachable!("Target::SideExit should have been compiled by compile_exits")
},
};
},
Insn::Je(target) | Insn::Jz(target) => {
emit_conditional_jump::<{Condition::EQ}>(cb, target.clone());
emit_conditional_jump::<{Condition::EQ}>(self, cb, target.clone());
},
Insn::Jne(target) | Insn::Jnz(target) | Insn::JoMul(target) => {
emit_conditional_jump::<{Condition::NE}>(cb, target.clone());
emit_conditional_jump::<{Condition::NE}>(self, cb, target.clone());
},
Insn::Jl(target) => {
emit_conditional_jump::<{Condition::LT}>(cb, target.clone());
emit_conditional_jump::<{Condition::LT}>(self, cb, target.clone());
},
Insn::Jg(target) => {
emit_conditional_jump::<{Condition::GT}>(cb, target.clone());
emit_conditional_jump::<{Condition::GT}>(self, cb, target.clone());
},
Insn::Jge(target) => {
emit_conditional_jump::<{Condition::GE}>(cb, target.clone());
emit_conditional_jump::<{Condition::GE}>(self, cb, target.clone());
},
Insn::Jbe(target) => {
emit_conditional_jump::<{Condition::LS}>(cb, target.clone());
emit_conditional_jump::<{Condition::LS}>(self, cb, target.clone());
},
Insn::Jb(target) => {
emit_conditional_jump::<{Condition::CC}>(cb, target.clone());
emit_conditional_jump::<{Condition::CC}>(self, cb, target.clone());
},
Insn::Jo(target) => {
emit_conditional_jump::<{Condition::VS}>(cb, target.clone());
emit_conditional_jump::<{Condition::VS}>(self, cb, target.clone());
},
Insn::Joz(opnd, target) => {
emit_cmp_zero_jump(cb, opnd.into(), true, target.clone());
Expand All @@ -1537,8 +1569,8 @@ impl Assembler {
let Some(Insn::Cmp {
left: status_reg @ Opnd::Reg(_),
right: Opnd::UImm(_) | Opnd::Imm(_),
}) = self.insns.get(insn_idx + 1) else {
panic!("arm64_scratch_split should add Cmp after IncrCounter: {:?}", self.insns.get(insn_idx + 1));
}) = insns.get(insn_idx + 1) else {
panic!("arm64_scratch_split should add Cmp after IncrCounter: {:?}", insns.get(insn_idx + 1));
};

// Attempt to increment a counter
Expand Down Expand Up @@ -1587,7 +1619,7 @@ impl Assembler {
} else {
// No bytes dropped, so the pos markers point to valid code
for (insn_idx, pos) in pos_markers {
if let Insn::PosMarker(callback) = self.insns.get(insn_idx).unwrap() {
if let Insn::PosMarker(callback) = insns.get(insn_idx).unwrap() {
callback(pos, cb);
} else {
panic!("non-PosMarker in pos_markers insn_idx={insn_idx} {self:?}");
Expand Down Expand Up @@ -1617,6 +1649,10 @@ impl Assembler {
if use_scratch_reg {
asm = asm.arm64_scratch_split();
asm_dump!(asm, scratch_split);
} else {
// For trampolines that use scratch registers, resolve ParallelMov without scratch_reg.
asm = asm.resolve_parallel_mov_pass();
asm_dump!(asm, resolve_parallel_mov);
}

// Create label instances in the code block
Expand Down Expand Up @@ -1681,19 +1717,23 @@ mod tests {

use super::*;
use insta::assert_snapshot;
use crate::hir;

static TEMP_REGS: [Reg; 5] = [X1_REG, X9_REG, X10_REG, X14_REG, X15_REG];

fn setup_asm() -> (Assembler, CodeBlock) {
crate::options::rb_zjit_prepare_options(); // Allow `get_option!` in Assembler
(Assembler::new(), CodeBlock::new_dummy())
let mut asm = Assembler::new();
asm.new_block_without_id();
(asm, CodeBlock::new_dummy())
}

#[test]
fn test_lir_string() {
use crate::hir::SideExitReason;

let mut asm = Assembler::new();
asm.new_block_without_id();
asm.stack_base_idx = 1;

let label = asm.new_label("bb0");
Expand Down Expand Up @@ -2107,6 +2147,7 @@ mod tests {
#[test]
fn test_store_with_valid_scratch_reg() {
let (mut asm, scratch_reg) = Assembler::new_with_scratch_reg();
asm.new_block_without_id();
let mut cb = CodeBlock::new_dummy();
asm.store(Opnd::mem(64, scratch_reg, 0), 0x83902.into());

Expand Down Expand Up @@ -2560,6 +2601,7 @@ mod tests {

crate::options::rb_zjit_prepare_options(); // Allow `get_option!` in Assembler
let mut asm = Assembler::new();
asm.new_block_without_id();
let mut cb = CodeBlock::new_dummy_sized(memory_required);

let far_label = asm.new_label("far");
Expand Down
Loading