@@ -85,21 +85,18 @@ std::unique_ptr<OperationPass<FuncOp>> mlir::createMemRefDataFlowOptPass() {
8585// This is a straightforward implementation not optimized for speed. Optimize
8686// if needed.
8787void MemRefDataFlowOpt::forwardStoreToLoad (AffineLoadOp loadOp) {
88- Operation *loadOpInst = loadOp.getOperation ();
89-
90- // First pass over the use list to get minimum number of surrounding
88+ // First pass over the use list to get the minimum number of surrounding
9189 // loops common between the load op and the store op, with min taken across
9290 // all store ops.
9391 SmallVector<Operation *, 8 > storeOps;
94- unsigned minSurroundingLoops = getNestingDepth (*loadOpInst );
92+ unsigned minSurroundingLoops = getNestingDepth (loadOp );
9593 for (auto *user : loadOp.getMemRef ().getUsers ()) {
9694 auto storeOp = dyn_cast<AffineStoreOp>(user);
9795 if (!storeOp)
9896 continue ;
99- auto *storeOpInst = storeOp.getOperation ();
100- unsigned nsLoops = getNumCommonSurroundingLoops (*loadOpInst, *storeOpInst);
97+ unsigned nsLoops = getNumCommonSurroundingLoops (*loadOp, *storeOp);
10198 minSurroundingLoops = std::min (nsLoops, minSurroundingLoops);
102- storeOps.push_back (storeOpInst );
99+ storeOps.push_back (storeOp );
103100 }
104101
105102 // The list of store op candidates for forwarding that satisfy conditions
@@ -111,12 +108,12 @@ void MemRefDataFlowOpt::forwardStoreToLoad(AffineLoadOp loadOp) {
111108 // post-dominance on these. 'fwdingCandidates' are a subset of depSrcStores.
112109 SmallVector<Operation *, 8 > depSrcStores;
113110
114- for (auto *storeOpInst : storeOps) {
115- MemRefAccess srcAccess (storeOpInst );
116- MemRefAccess destAccess (loadOpInst );
111+ for (auto *storeOp : storeOps) {
112+ MemRefAccess srcAccess (storeOp );
113+ MemRefAccess destAccess (loadOp );
117114 // Find stores that may be reaching the load.
118115 FlatAffineConstraints dependenceConstraints;
119- unsigned nsLoops = getNumCommonSurroundingLoops (*loadOpInst , *storeOpInst );
116+ unsigned nsLoops = getNumCommonSurroundingLoops (*loadOp , *storeOp );
120117 unsigned d;
121118 // Dependences at loop depth <= minSurroundingLoops do NOT matter.
122119 for (d = nsLoops + 1 ; d > minSurroundingLoops; d--) {
@@ -130,7 +127,7 @@ void MemRefDataFlowOpt::forwardStoreToLoad(AffineLoadOp loadOp) {
130127 continue ;
131128
132129 // Stores that *may* be reaching the load.
133- depSrcStores.push_back (storeOpInst );
130+ depSrcStores.push_back (storeOp );
134131
135132 // 1. Check if the store and the load have mathematically equivalent
136133 // affine access functions; this implies that they statically refer to the
@@ -144,11 +141,11 @@ void MemRefDataFlowOpt::forwardStoreToLoad(AffineLoadOp loadOp) {
144141 continue ;
145142
146143 // 2. The store has to dominate the load op to be candidate.
147- if (!domInfo->dominates (storeOpInst, loadOpInst ))
144+ if (!domInfo->dominates (storeOp, loadOp ))
148145 continue ;
149146
150147 // We now have a candidate for forwarding.
151- fwdingCandidates.push_back (storeOpInst );
148+ fwdingCandidates.push_back (storeOp );
152149 }
153150
154151 // 3. Of all the store op's that meet the above criteria, the store that
@@ -158,11 +155,11 @@ void MemRefDataFlowOpt::forwardStoreToLoad(AffineLoadOp loadOp) {
158155 // Note: this can be implemented in a cleaner way with postdominator tree
159156 // traversals. Consider this for the future if needed.
160157 Operation *lastWriteStoreOp = nullptr ;
161- for (auto *storeOpInst : fwdingCandidates) {
158+ for (auto *storeOp : fwdingCandidates) {
162159 if (llvm::all_of (depSrcStores, [&](Operation *depStore) {
163- return postDomInfo->postDominates (storeOpInst , depStore);
160+ return postDomInfo->postDominates (storeOp , depStore);
164161 })) {
165- lastWriteStoreOp = storeOpInst ;
162+ lastWriteStoreOp = storeOp ;
166163 break ;
167164 }
168165 }
@@ -175,7 +172,7 @@ void MemRefDataFlowOpt::forwardStoreToLoad(AffineLoadOp loadOp) {
175172 // Record the memref for a later sweep to optimize away.
176173 memrefsToErase.insert (loadOp.getMemRef ());
177174 // Record this to erase later.
178- loadOpsToErase.push_back (loadOpInst );
175+ loadOpsToErase.push_back (loadOp );
179176}
180177
181178void MemRefDataFlowOpt::runOnFunction () {
@@ -192,32 +189,31 @@ void MemRefDataFlowOpt::runOnFunction() {
192189 loadOpsToErase.clear ();
193190 memrefsToErase.clear ();
194191
195- // Walk all load's and perform load/ store forwarding.
192+ // Walk all load's and perform store to load forwarding.
196193 f.walk ([&](AffineLoadOp loadOp) { forwardStoreToLoad (loadOp); });
197194
198195 // Erase all load op's whose results were replaced with store fwd'ed ones.
199- for (auto *loadOp : loadOpsToErase) {
196+ for (auto *loadOp : loadOpsToErase)
200197 loadOp->erase ();
201- }
202198
203199 // Check if the store fwd'ed memrefs are now left with only stores and can
204200 // thus be completely deleted. Note: the canonicalize pass should be able
205201 // to do this as well, but we'll do it here since we collected these anyway.
206202 for (auto memref : memrefsToErase) {
207203 // If the memref hasn't been alloc'ed in this function, skip.
208- Operation *defInst = memref.getDefiningOp ();
209- if (!defInst || !isa<AllocOp>(defInst ))
204+ Operation *defOp = memref.getDefiningOp ();
205+ if (!defOp || !isa<AllocOp>(defOp ))
210206 // TODO(mlir-team): if the memref was returned by a 'call' operation, we
211207 // could still erase it if the call had no side-effects.
212208 continue ;
213- if (llvm::any_of (memref.getUsers (), [&](Operation *ownerInst ) {
214- return (!isa<AffineStoreOp>(ownerInst ) && !isa<DeallocOp>(ownerInst ));
209+ if (llvm::any_of (memref.getUsers (), [&](Operation *ownerOp ) {
210+ return (!isa<AffineStoreOp>(ownerOp ) && !isa<DeallocOp>(ownerOp ));
215211 }))
216212 continue ;
217213
218214 // Erase all stores, the dealloc, and the alloc on the memref.
219215 for (auto *user : llvm::make_early_inc_range (memref.getUsers ()))
220216 user->erase ();
221- defInst ->erase ();
217+ defOp ->erase ();
222218 }
223219}
0 commit comments