Skip to content

Commit 9e2c054

Browse files
committed
Add the MIR utils used by redundant_clone
1 parent 3505eba commit 9e2c054

File tree

7 files changed

+1895
-0
lines changed

7 files changed

+1895
-0
lines changed

clippy_mir/Cargo.toml

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
[package]
2+
name = "clippy_mir"
3+
version = "0.0.1"
4+
edition = "2024"
5+
6+
[dependencies]
7+
clippy_data_structures = { path = "../clippy_data_structures" }
8+
indexmap = "2.0.0"
9+
10+
[package.metadata.rust-analyzer]
11+
# This package uses #[feature(rustc_private)]
12+
rustc_private = true

clippy_mir/src/analysis.rs

Lines changed: 287 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,287 @@
1+
//! A simple framework for running dataflow analyses on the basic block graphs of MIR bodies.
2+
//!
3+
//! The main entry point is `run_analysis` which requires a few things to be set up first.
4+
//!
5+
//! * A `BlockOrderMap`. This defines the order in which that analysis will check blocks.
6+
//! * A `WorkQueue`. Used by the analysis to track which blocks still need to analyzed.
7+
//! * The graph edge list. Used by the analysis to know which blocks to transfer the result of an
8+
//! analyzed block to.
9+
//! * An `Analysis` impl. This defines the state type, each block's state transformation function
10+
//! and the transfer function.
11+
//!
12+
//! Dataflow analysis works by starting with each node in a directed graph (basic blocks in this
13+
//! case) getting an initial state and a work queue that contains every node. For each node in the
14+
//! queue a few steps will be taken:
15+
//!
16+
//! * The node will be removed from the queue.
17+
//! * That node's transformation function will take the node's current state to produce a new state.
18+
//! Note that this does not modify the current node's state, only computes a new one.
19+
//! * For each immediate successor node a transfer function will modify the successor's state using
20+
//! the previously computed state.
21+
//! * Each successor node which had their state changed are added to the work queue if they are not
22+
//! already there.
23+
//!
24+
//! Once there are no nodes left to take from the work queue the analysis is complete.
25+
26+
use clippy_data_structures::{SliceSet, bit_slice};
27+
use core::cmp::minmax;
28+
use core::iter;
29+
use rustc_arena::DroplessArena;
30+
use rustc_index::IndexSlice;
31+
use rustc_middle::mir::{BasicBlock, Body, TerminatorKind, UnwindAction};
32+
33+
rustc_index::newtype_index! {
34+
/// A reordered block index.
35+
#[orderable]
36+
pub struct OrderedBlock {}
37+
}
38+
39+
/// Bi-directional mapping to reorder blocks.
40+
pub struct BlockOrderMap<'a> {
41+
from_ordered: &'a IndexSlice<OrderedBlock, BasicBlock>,
42+
to_ordered: &'a IndexSlice<BasicBlock, OrderedBlock>,
43+
}
44+
impl<'a> BlockOrderMap<'a> {
45+
/// Creates a new mapping for a reverse postorder ordering.
46+
pub fn new_reverse_postorder(arena: &'a DroplessArena, body: &'a Body<'_>) -> Self {
47+
let from_ordered = IndexSlice::<OrderedBlock, _>::from_raw(body.basic_blocks.reverse_postorder());
48+
let to_ordered = IndexSlice::<BasicBlock, _>::from_raw_mut(
49+
arena.alloc_from_iter(iter::repeat_with(|| OrderedBlock::ZERO).take(from_ordered.len())),
50+
);
51+
for (x, &y) in from_ordered.iter_enumerated() {
52+
to_ordered[y] = x;
53+
}
54+
55+
Self {
56+
from_ordered,
57+
to_ordered,
58+
}
59+
}
60+
61+
#[inline]
62+
#[must_use]
63+
pub fn to_ordered(&self) -> &'a IndexSlice<BasicBlock, OrderedBlock> {
64+
self.to_ordered
65+
}
66+
67+
#[inline]
68+
#[must_use]
69+
#[expect(clippy::wrong_self_convention)]
70+
pub fn from_ordered(&self) -> &'a IndexSlice<OrderedBlock, BasicBlock> {
71+
self.from_ordered
72+
}
73+
}
74+
75+
/// Queue that will remove blocks in order.
76+
pub struct WorkQueue<'arena> {
77+
queue: &'arena mut [bit_slice::Word],
78+
word: bit_slice::Word,
79+
offset: u32,
80+
domain_size: u32,
81+
}
82+
impl<'arena> WorkQueue<'arena> {
83+
/// Creates a new empty queue for the given body.
84+
#[expect(clippy::cast_possible_truncation)]
85+
pub fn new(arena: &'arena DroplessArena, body: &Body<'_>) -> Self {
86+
Self {
87+
queue: arena.alloc_from_iter(iter::repeat_n(
88+
0,
89+
bit_slice::word_count_from_bits(body.basic_blocks.len()),
90+
)),
91+
word: 0,
92+
offset: 0,
93+
domain_size: body.basic_blocks.len() as u32,
94+
}
95+
}
96+
97+
/// Fills the queue with all blocks.
98+
fn fill(&mut self) {
99+
self.queue.fill(!0);
100+
if let Some(word) = self.queue.last_mut() {
101+
*word &= bit_slice::final_mask_for_size(self.domain_size as usize);
102+
}
103+
self.offset = 0;
104+
self.word = self.queue.first().copied().unwrap_or(0);
105+
}
106+
107+
/// Extracts the next block in the queue.
108+
#[expect(clippy::cast_possible_truncation)]
109+
fn next(&mut self) -> Option<OrderedBlock> {
110+
if self.word == 0 {
111+
self.queue[self.offset as usize] = 0;
112+
self.offset += self.queue[self.offset as usize + 1..].iter().position(|&x| x != 0)? as u32 + 1;
113+
self.word = self.queue[self.offset as usize];
114+
}
115+
let bit = self.word.trailing_zeros() as usize;
116+
self.word ^= 1 << bit;
117+
Some(OrderedBlock::from_usize(
118+
bit | (self.offset as usize * bit_slice::WORD_BITS),
119+
))
120+
}
121+
122+
/// Inserts a single block into the queue.
123+
#[track_caller]
124+
#[expect(clippy::cast_possible_truncation)]
125+
pub fn insert(&mut self, block: OrderedBlock) {
126+
debug_assert!(block.as_u32() < self.domain_size);
127+
let word = block.as_u32() / bit_slice::WORD_BITS as u32;
128+
let bit = 1 << (block.as_usize() % bit_slice::WORD_BITS);
129+
130+
self.queue[self.offset as usize] = self.word;
131+
self.queue[word as usize] |= bit;
132+
self.offset = self.offset.min(word);
133+
self.word |= self.queue[self.offset as usize];
134+
}
135+
136+
/// Inserts a sorted sequence of blocks into the queue.
137+
#[track_caller]
138+
#[expect(clippy::cast_possible_truncation)]
139+
pub fn insert_sorted(&mut self, blocks: impl IntoIterator<Item = OrderedBlock>) {
140+
let mut blocks = blocks.into_iter();
141+
let Some(block) = blocks.next() else {
142+
return;
143+
};
144+
debug_assert!(block.as_u32() < self.domain_size);
145+
let word = block.as_u32() / bit_slice::WORD_BITS as u32;
146+
let bit = 1 << (block.as_usize() % bit_slice::WORD_BITS);
147+
148+
self.queue[self.offset as usize] = self.word;
149+
self.offset = self.offset.min(word);
150+
151+
self.queue[word as usize] |= bit;
152+
for block in blocks {
153+
debug_assert!(block.as_u32() < self.domain_size);
154+
let idx = block.as_usize() / bit_slice::WORD_BITS;
155+
let bit = 1 << (block.as_usize() % bit_slice::WORD_BITS);
156+
self.queue[idx] |= bit;
157+
}
158+
159+
self.word = self.queue[self.offset as usize];
160+
}
161+
}
162+
163+
/// Extracts the body's edges and orders them via the block map.
164+
pub fn get_body_edges<'arena>(
165+
arena: &'arena DroplessArena,
166+
body: &Body<'_>,
167+
block_map: &BlockOrderMap<'_>,
168+
) -> &'arena IndexSlice<OrderedBlock, &'arena SliceSet<OrderedBlock>> {
169+
let blocks = IndexSlice::<OrderedBlock, _>::from_raw_mut(
170+
arena.alloc_from_iter(iter::repeat_with(SliceSet::empty).take(body.basic_blocks.len())),
171+
);
172+
for (block, block_data) in body.basic_blocks.iter_enumerated() {
173+
blocks[block_map.to_ordered[block]] = match block_data.terminator().kind {
174+
TerminatorKind::Drop {
175+
target,
176+
unwind: UnwindAction::Cleanup(cleanup),
177+
..
178+
}
179+
| TerminatorKind::Call {
180+
target: Some(target),
181+
unwind: UnwindAction::Cleanup(cleanup),
182+
..
183+
}
184+
| TerminatorKind::Assert {
185+
target,
186+
unwind: UnwindAction::Cleanup(cleanup),
187+
..
188+
}
189+
| TerminatorKind::Yield {
190+
resume: target,
191+
drop: Some(cleanup),
192+
..
193+
} => SliceSet::from_sorted(
194+
arena.alloc_from_iter(minmax(block_map.to_ordered[target], block_map.to_ordered[cleanup])),
195+
),
196+
197+
TerminatorKind::Goto { target }
198+
| TerminatorKind::Drop { target, .. }
199+
| TerminatorKind::Assert { target, .. }
200+
| TerminatorKind::Call {
201+
target: Some(target), ..
202+
}
203+
| TerminatorKind::Call {
204+
unwind: UnwindAction::Cleanup(target),
205+
..
206+
}
207+
| TerminatorKind::Yield { resume: target, .. }
208+
| TerminatorKind::FalseEdge {
209+
real_target: target, ..
210+
}
211+
| TerminatorKind::FalseUnwind {
212+
real_target: target, ..
213+
} => SliceSet::from_ref(arena.alloc(block_map.to_ordered[target])),
214+
215+
TerminatorKind::SwitchInt { ref targets, .. } => SliceSet::from_unsorted_slice_dedup(
216+
arena.alloc_from_iter(targets.all_targets().iter().map(|&target| block_map.to_ordered[target])),
217+
),
218+
219+
TerminatorKind::InlineAsm {
220+
ref targets, unwind, ..
221+
} => {
222+
let targets = targets.iter().map(|&target| block_map.to_ordered[target]);
223+
SliceSet::from_unsorted_slice(if let UnwindAction::Cleanup(cleanup) = unwind {
224+
arena.alloc_from_iter(targets.chain([block_map.to_ordered[cleanup]]))
225+
} else {
226+
arena.alloc_from_iter(targets)
227+
})
228+
},
229+
230+
TerminatorKind::UnwindResume
231+
| TerminatorKind::UnwindTerminate(_)
232+
| TerminatorKind::Return
233+
| TerminatorKind::Unreachable
234+
| TerminatorKind::TailCall { .. }
235+
| TerminatorKind::Call { .. }
236+
| TerminatorKind::CoroutineDrop => SliceSet::empty(),
237+
}
238+
}
239+
blocks
240+
}
241+
242+
pub trait Analysis {
243+
/// The state type stored for each node in the graph.
244+
type Domain;
245+
246+
/// Creates a copy of a block's entry state before calling `apply_block_transform`.
247+
fn clone_block_entry(&mut self, src: &Self::Domain, dst: &mut Self::Domain, block: OrderedBlock);
248+
249+
/// Transfers the computed value from a previous block to the entry state of a successor block
250+
/// and returns whether the successor block's state has changed.
251+
fn transfer_domain(
252+
&mut self,
253+
src: &Self::Domain,
254+
dst: &mut Self::Domain,
255+
src_block: OrderedBlock,
256+
dst_block: OrderedBlock,
257+
) -> bool;
258+
259+
/// Applies the transformation function of a block to it's entry state.
260+
fn apply_block_transform(&mut self, state: &mut Self::Domain, block: OrderedBlock);
261+
}
262+
263+
/// Runs an analysis until it reaches a fix state.
264+
///
265+
/// See the module documentation for details.
266+
pub fn run_analysis<A: Analysis>(
267+
queue: &mut WorkQueue,
268+
edges: &IndexSlice<OrderedBlock, &SliceSet<OrderedBlock>>,
269+
states: &mut IndexSlice<OrderedBlock, A::Domain>,
270+
tmp_state: &mut A::Domain,
271+
analysis: &mut A,
272+
) {
273+
debug_assert_eq!(queue.domain_size as usize, edges.len());
274+
debug_assert_eq!(queue.domain_size as usize, states.len());
275+
276+
queue.fill();
277+
while let Some(block) = queue.next() {
278+
analysis.clone_block_entry(&states[block], tmp_state, block);
279+
analysis.apply_block_transform(tmp_state, block);
280+
queue.insert_sorted(
281+
edges[block]
282+
.iter()
283+
.copied()
284+
.filter(|&dst_block| analysis.transfer_domain(tmp_state, &mut states[dst_block], block, dst_block)),
285+
);
286+
}
287+
}

0 commit comments

Comments
 (0)