Skip to content

Commit b8b12c3

Browse files
committed
Revert "wip"
This reverts commit 914e23a.
1 parent a8615b3 commit b8b12c3

File tree

2 files changed

+1
-220
lines changed

2 files changed

+1
-220
lines changed

compiler/rustc_middle/src/mir/basic_blocks.rs

Lines changed: 0 additions & 112 deletions
Original file line numberDiff line numberDiff line change
@@ -33,93 +33,6 @@ struct Cache {
3333
predecessors: OnceLock<Predecessors>,
3434
reverse_postorder: OnceLock<Vec<BasicBlock>>,
3535
dominators: OnceLock<Dominators<BasicBlock>>,
36-
is_cyclic: OnceLock<bool>,
37-
sccs: OnceLock<SccData>,
38-
}
39-
40-
#[derive(Clone, Default, Debug)]
41-
pub struct SccData {
42-
pub component_count: usize,
43-
44-
/// The SCC of each block.
45-
pub components: IndexVec<BasicBlock, u32>,
46-
47-
/// The contents of each SCC: its blocks, in RPO.
48-
pub sccs: Vec<SmallVec<[BasicBlock; 2]>>,
49-
}
50-
51-
use std::collections::VecDeque;
52-
53-
struct PearceRecursive {
54-
r_index: IndexVec<BasicBlock, u32>,
55-
stack: VecDeque<BasicBlock>,
56-
index: u32,
57-
c: u32,
58-
}
59-
60-
impl PearceRecursive {
61-
fn new(node_count: usize) -> Self {
62-
assert!(node_count > 0); // only a non-empty graph is supported
63-
// todo: assert node_count is within overflow limits
64-
Self {
65-
r_index: IndexVec::from_elem_n(0, node_count),
66-
stack: VecDeque::new(),
67-
index: 1,
68-
c: node_count.try_into().unwrap(),
69-
// c: node_count - 1,
70-
}
71-
}
72-
73-
fn compute_sccs(&mut self, blocks: &IndexVec<BasicBlock, BasicBlockData<'_>>) {
74-
for v in blocks.indices() {
75-
if self.r_index[v] == 0 {
76-
self.visit(v, blocks);
77-
}
78-
}
79-
80-
// The SCC labels are from N - 1 to zero, remap them from 0 to the component count, to match
81-
// their position in an array of SCCs.
82-
let node_count: u32 = blocks.len().try_into().unwrap();
83-
for scc_index in self.r_index.iter_mut() {
84-
*scc_index = node_count - *scc_index - 1;
85-
}
86-
87-
// Adjust the component index counter to the component count
88-
self.c = node_count - self.c;
89-
}
90-
91-
fn visit(&mut self, v: BasicBlock, blocks: &IndexVec<BasicBlock, BasicBlockData<'_>>) {
92-
let mut root = true;
93-
self.r_index[v] = self.index;
94-
self.index += 1;
95-
96-
for w in blocks[v].terminator().successors() {
97-
if self.r_index[w] == 0 {
98-
self.visit(w, blocks);
99-
}
100-
if self.r_index[w] < self.r_index[v] {
101-
self.r_index[v] = self.r_index[w];
102-
root = false;
103-
}
104-
}
105-
106-
if root {
107-
self.index -= 1;
108-
self.c -= 1;
109-
110-
while let Some(&w) = self.stack.front()
111-
&& self.r_index[v] <= self.r_index[w]
112-
{
113-
self.stack.pop_front();
114-
self.r_index[w] = self.c;
115-
self.index -= 1;
116-
}
117-
118-
self.r_index[v] = self.c;
119-
} else {
120-
self.stack.push_front(v);
121-
}
122-
}
12336
}
12437

12538
impl<'tcx> BasicBlocks<'tcx> {
@@ -128,35 +41,10 @@ impl<'tcx> BasicBlocks<'tcx> {
12841
BasicBlocks { basic_blocks, cache: Cache::default() }
12942
}
13043

131-
/// Returns true if control-flow graph contains a cycle reachable from the `START_BLOCK`.
132-
#[inline]
133-
pub fn is_cfg_cyclic(&self) -> bool {
134-
*self.cache.is_cyclic.get_or_init(|| graph::is_cyclic(self))
135-
}
136-
137-
#[inline]
13844
pub fn dominators(&self) -> &Dominators<BasicBlock> {
13945
self.cache.dominators.get_or_init(|| dominators(self))
14046
}
14147

142-
#[inline]
143-
pub fn sccs(&self) -> &SccData {
144-
self.cache.sccs.get_or_init(|| {
145-
let block_count = self.basic_blocks.len();
146-
147-
let mut pearce = PearceRecursive::new(block_count);
148-
pearce.compute_sccs(&self.basic_blocks);
149-
let component_count = pearce.c as usize;
150-
151-
let mut sccs = vec![smallvec::SmallVec::new(); component_count];
152-
for &block in self.reverse_postorder().iter() {
153-
let scc = pearce.r_index[block] as usize;
154-
sccs[scc].push(block);
155-
}
156-
SccData { component_count, components: pearce.r_index, sccs }
157-
})
158-
}
159-
16048
/// Returns predecessors for each basic block.
16149
#[inline]
16250
pub fn predecessors(&self) -> &Predecessors {

compiler/rustc_mir_dataflow/src/framework/mod.rs

Lines changed: 1 addition & 108 deletions
Original file line numberDiff line numberDiff line change
@@ -229,27 +229,6 @@ pub trait Analysis<'tcx> {
229229
unreachable!();
230230
}
231231

232-
#[inline]
233-
fn iterate_to_fixpoint<'mir>(
234-
self,
235-
tcx: TyCtxt<'tcx>,
236-
body: &'mir mir::Body<'tcx>,
237-
pass_name: Option<&'static str>,
238-
) -> AnalysisAndResults<'tcx, Self>
239-
where
240-
Self: Sized,
241-
Self::Domain: DebugWithContext<Self>,
242-
{
243-
// Computing dataflow over the SCCs is only supported in forward analyses. It's also
244-
// unnecessary to use it on acyclic graphs, as the condensation graph is of course the same
245-
// as the CFG itself.
246-
if Self::Direction::IS_BACKWARD || !body.basic_blocks.is_cfg_cyclic() {
247-
self.iterate_to_fixpoint_per_block(tcx, body, pass_name)
248-
} else {
249-
self.iterate_to_fixpoint_per_scc(tcx, body, pass_name)
250-
}
251-
}
252-
253232
/* Extension methods */
254233

255234
/// Finds the fixpoint for this dataflow problem.
@@ -265,7 +244,7 @@ pub trait Analysis<'tcx> {
265244
/// dataflow analysis. Some analyses are run multiple times in the compilation pipeline.
266245
/// Without a `pass_name` to differentiates them, only the results for the latest run will be
267246
/// saved.
268-
fn iterate_to_fixpoint_per_block<'mir>(
247+
fn iterate_to_fixpoint<'mir>(
269248
mut self,
270249
tcx: TyCtxt<'tcx>,
271250
body: &'mir mir::Body<'tcx>,
@@ -329,92 +308,6 @@ pub trait Analysis<'tcx> {
329308

330309
AnalysisAndResults { analysis: self, results }
331310
}
332-
333-
fn iterate_to_fixpoint_per_scc<'mir>(
334-
mut self,
335-
_tcx: TyCtxt<'tcx>,
336-
body: &'mir mir::Body<'tcx>,
337-
_pass_name: Option<&'static str>,
338-
) -> AnalysisAndResults<'tcx, Self>
339-
where
340-
Self: Sized,
341-
Self::Domain: DebugWithContext<Self>,
342-
{
343-
assert!(Self::Direction::IS_FORWARD);
344-
345-
let sccs = body.basic_blocks.sccs();
346-
347-
struct VecQueue<T: Idx> {
348-
queue: Vec<T>,
349-
set: DenseBitSet<T>,
350-
}
351-
352-
impl<T: Idx> VecQueue<T> {
353-
#[inline]
354-
fn with_none(len: usize) -> Self {
355-
VecQueue { queue: Vec::with_capacity(len), set: DenseBitSet::new_empty(len) }
356-
}
357-
358-
#[inline]
359-
fn insert(&mut self, element: T) {
360-
if self.set.insert(element) {
361-
self.queue.push(element);
362-
}
363-
}
364-
}
365-
366-
let mut scc_queue = VecQueue::with_none(sccs.component_count);
367-
for &bb in body.basic_blocks.reverse_postorder().iter() {
368-
// let scc = sccs.components[bb.as_usize()];
369-
let scc = sccs.components[bb];
370-
scc_queue.insert(scc);
371-
}
372-
// assert_eq!(scc_queue.queue, sccs.queue);
373-
374-
let mut results = IndexVec::from_fn_n(|_| self.bottom_value(body), body.basic_blocks.len());
375-
self.initialize_start_block(body, &mut results[mir::START_BLOCK]);
376-
377-
// Worklist for per-SCC iterations
378-
let mut dirty_queue: WorkQueue<BasicBlock> = WorkQueue::with_none(body.basic_blocks.len());
379-
380-
let mut state = self.bottom_value(body);
381-
382-
for &scc in &scc_queue.queue {
383-
// les blocks doivent être ajoutés en RPO
384-
// for block in sccs.blocks_in_rpo(scc as usize) {
385-
for block in sccs.sccs[scc as usize].iter().copied() {
386-
dirty_queue.insert(block);
387-
}
388-
389-
while let Some(bb) = dirty_queue.pop() {
390-
// Set the state to the entry state of the block. This is equivalent to `state =
391-
// results[bb].clone()`, but it saves an allocation, thus improving compile times.
392-
state.clone_from(&results[bb]);
393-
394-
Self::Direction::apply_effects_in_block(
395-
&mut self,
396-
body,
397-
&mut state,
398-
bb,
399-
&body[bb],
400-
|target: BasicBlock, state: &Self::Domain| {
401-
let set_changed = results[target].join(state);
402-
// let target_scc = sccs.components[target.as_usize()];
403-
let target_scc = sccs.components[target];
404-
if set_changed && target_scc == scc {
405-
// The target block is in the SCC we're currently processing, and we
406-
// want to process this block until fixpoint. Otherwise, the target
407-
// block is in a successor SCC and it will be processed when that SCC is
408-
// encountered later.
409-
dirty_queue.insert(target);
410-
}
411-
},
412-
);
413-
}
414-
}
415-
416-
AnalysisAndResults { analysis: self, results }
417-
}
418311
}
419312

420313
/// The legal operations for a transfer function in a gen/kill problem.

0 commit comments

Comments
 (0)