/* * This file was initially derived from the files * `js/src/jit/BacktrackingAllocator.h` and * `js/src/jit/BacktrackingAllocator.cpp` in Mozilla Firefox, and was * originally licensed under the Mozilla Public License 2.0. We * subsequently relicensed it to Apache-2.0 WITH LLVM-exception (see * https://github.com/bytecodealliance/regalloc2/issues/7). * * Since the initial port, the design has been substantially evolved * and optimized. */ //! Spillslot allocation. use super::{ AllocRegResult, Env, LiveRangeKey, PReg, PRegIndex, RegTraversalIter, SpillSetIndex, SpillSlotData, SpillSlotIndex, }; use crate::{Allocation, Function, SpillSlot}; impl<'a, F: Function> Env<'a, F> { pub fn try_allocating_regs_for_spilled_bundles(&mut self) { trace!("allocating regs for spilled bundles"); let mut scratch = core::mem::take(&mut self.ctx.scratch_conflicts); for i in 0..self.ctx.spilled_bundles.len() { let bundle = self.ctx.spilled_bundles[i]; // don't borrow self if self.ctx.bundles[bundle].ranges.is_empty() { continue; } let class = self.ctx.spillsets[self.ctx.bundles[bundle].spillset].class; let hint = self.ctx.spillsets[self.ctx.bundles[bundle].spillset].reg_hint; // This may be an empty-range bundle whose ranges are not // sorted; sort all range-lists again here. self.ctx.bundles[bundle] .ranges .sort_unstable_by_key(|entry| entry.range.from); let mut success = false; self.ctx.output.stats.spill_bundle_reg_probes += 1; for preg in RegTraversalIter::new(self.env, class, hint, PReg::invalid(), bundle.index(), None) { trace!("trying bundle {:?} to preg {:?}", bundle, preg); let preg_idx = PRegIndex::new(preg.index()); if let AllocRegResult::Allocated(_) = self.try_to_allocate_bundle_to_reg(bundle, preg_idx, None, &mut scratch) { self.ctx.output.stats.spill_bundle_reg_success += 1; success = true; break; } } if !success { trace!( "spilling bundle {:?}: marking spillset {:?} as required", bundle, self.ctx.bundles[bundle].spillset ); self.ctx.spillsets[self.ctx.bundles[bundle].spillset].required = true; } } self.ctx.scratch_conflicts = scratch; } pub fn spillslot_can_fit_spillset( &mut self, spillslot: SpillSlotIndex, spillset: SpillSetIndex, ) -> bool { !self.ctx.spillslots[spillslot.index()] .ranges .btree .contains_key(&LiveRangeKey::from_range( &self.ctx.spillsets[spillset].range, )) } pub fn allocate_spillset_to_spillslot( &mut self, spillset: SpillSetIndex, spillslot: SpillSlotIndex, ) { self.ctx.spillsets[spillset].slot = spillslot; let res = self.ctx.spillslots[spillslot.index()].ranges.btree.insert( LiveRangeKey::from_range(&self.ctx.spillsets[spillset].range), spillset, ); debug_assert!(res.is_none()); } pub fn allocate_spillslots(&mut self) { const MAX_ATTEMPTS: usize = 10; for spillset in 0..self.ctx.spillsets.len() { trace!("allocate spillslot: {}", spillset); let spillset = SpillSetIndex::new(spillset); if !self.ctx.spillsets[spillset].required { continue; } let class = self.ctx.spillsets[spillset].class as usize; // Try a few existing spillslots. let mut i = self.ctx.slots_by_class[class].probe_start; let mut success = false; // Never probe the same element more than once: limit the // attempt count to the number of slots in existence. for _attempt in 0..core::cmp::min(self.ctx.slots_by_class[class].slots.len(), MAX_ATTEMPTS) { // Note: this indexing of `slots` is always valid // because either the `slots` list is empty and the // iteration limit above consequently means we don't // run this loop at all, or else `probe_start` is // in-bounds (because it is made so below when we add // a slot, and it always takes on the last index `i` // after this loop). let spillslot = self.ctx.slots_by_class[class].slots[i]; if self.spillslot_can_fit_spillset(spillslot, spillset) { self.allocate_spillset_to_spillslot(spillset, spillslot); success = true; self.ctx.slots_by_class[class].probe_start = i; break; } i = self.ctx.slots_by_class[class].next_index(i); } if !success { // Allocate a new spillslot. let spillslot = SpillSlotIndex::new(self.ctx.spillslots.len()); self.ctx.spillslots.push(SpillSlotData { ranges: self.ctx.scratch_spillset_pool.pop().unwrap_or_default(), alloc: Allocation::none(), slots: self.func.spillslot_size(self.ctx.spillsets[spillset].class) as u32, }); self.ctx.slots_by_class[class].slots.push(spillslot); self.ctx.slots_by_class[class].probe_start = self.ctx.slots_by_class[class].slots.len() - 1; self.allocate_spillset_to_spillslot(spillset, spillslot); } } // Assign actual slot indices to spillslots. for i in 0..self.ctx.spillslots.len() { self.ctx.spillslots[i].alloc = self.allocate_spillslot(self.ctx.spillslots[i].slots); } trace!("spillslot allocator done"); } pub fn allocate_spillslot(&mut self, size: u32) -> Allocation { let mut offset = self.ctx.output.num_spillslots as u32; // Align up to `size`. debug_assert!(size.is_power_of_two()); offset = (offset + size - 1) & !(size - 1); let slot = if self.func.multi_spillslot_named_by_last_slot() { offset + size - 1 } else { offset }; offset += size; self.ctx.output.num_spillslots = offset as _; Allocation::stack(SpillSlot::new(slot as usize)) } }