36
36
#include " swift/SIL/SILInstruction.h"
37
37
#include " swift/SIL/SILModule.h"
38
38
#include " swift/SIL/SILUndef.h"
39
+ #include " swift/SIL/MemAccessUtils.h"
39
40
#include " swift/SILOptimizer/Analysis/ArraySemantic.h"
40
41
#include " swift/SILOptimizer/PassManager/Passes.h"
41
42
#include " swift/SILOptimizer/PassManager/Transforms.h"
@@ -248,18 +249,112 @@ static bool canZapInstruction(SILInstruction *Inst, bool acceptRefCountInsts,
248
249
return false ;
249
250
}
250
251
252
+ // / Returns true if all stores in \p users store to the tail elements of
253
+ // / \p allocRef, which are destroyed by the \p destroyArray builtin.
254
+ static bool onlyStoresToTailObjects (BuiltinInst *destroyArray,
255
+ const UserList &users,
256
+ AllocRefInst *allocRef) {
257
+ // Get the number of destroyed elements.
258
+ auto *literal = dyn_cast<IntegerLiteralInst>(destroyArray->getArguments ()[2 ]);
259
+ if (!literal || literal->getValue ().getMinSignedBits () > 32 )
260
+ return false ;
261
+ int numDestroyed = literal->getValue ().getSExtValue ();
262
+
263
+ SILFunction *func = destroyArray->getFunction ();
264
+ SILBasicBlock *storesBlock = nullptr ;
265
+
266
+ // Check if the destroyArray destroys the tail elements of allocRef.
267
+ auto destroyPath = AccessPath::compute (destroyArray->getArguments ()[1 ]);
268
+ if (destroyPath != AccessPath::forTailStorage (allocRef))
269
+ return false ;
270
+
271
+ SmallVector<AccessPath, 32 > pathsToCheck;
272
+
273
+ // Check all stores to the tail elements.
274
+ for (SILInstruction *user : users) {
275
+ auto *store = dyn_cast<StoreInst>(user);
276
+ if (!store)
277
+ continue ;
278
+
279
+ assert (users.count (store->getSrc ()->getDefiningInstruction ()) == 0 &&
280
+ " Storing a use of an array (that would mean the array escapes)?" );
281
+
282
+ // All stores must be in the same block. This ensure that the stores
283
+ // dominate the destroyArray (which may be in a different block).
284
+ if (storesBlock && store->getParent () != storesBlock)
285
+ return false ;
286
+ storesBlock = store->getParent ();
287
+
288
+ AccessPath storePath = AccessPath::compute (store->getDest ());
289
+ if (!storePath.isValid ())
290
+ return false ;
291
+
292
+ // We don't care about trivial stores.
293
+ if (store->getSrc ()->getType ().isTrivial (*func))
294
+ continue ;
295
+
296
+ // Check if it's a store to the tail elements.
297
+ if (!destroyPath.contains (storePath.withOffset (0 )))
298
+ return false ;
299
+
300
+ // Check if the store is within the range of the destroyed array. In OSSA
301
+ // we would not need this check. Otherwise it would be a memory lifetime
302
+ // failure.
303
+ if (storePath.getOffset () < 0 || storePath.getOffset () >= numDestroyed)
304
+ return false ;
305
+
306
+ pathsToCheck.push_back (storePath);
307
+ }
308
+
309
+ // In non-OSSA we have to check if two paths overlap, because we could end up
310
+ // over-releasing the stored objects.
311
+ // Group the paths by tail-element index, so that we only have to check within
312
+ // a tail-element group.
313
+ std::sort (pathsToCheck.begin (), pathsToCheck.end (), [](AccessPath p1, AccessPath p2) {
314
+ return p1.getOffset () < p2.getOffset ();
315
+ });
316
+ for (unsigned i = 0 , n = pathsToCheck.size (); i < n; ++i) {
317
+ for (unsigned j = i + 1 ;
318
+ j < n && pathsToCheck[i].getOffset () == pathsToCheck[j].getOffset (); ++j) {
319
+ if (pathsToCheck[i].mayOverlap (pathsToCheck[j]))
320
+ return false ;
321
+ // Limit the number of checks to avoid quadratic complexity.
322
+ if (j > i + 8 )
323
+ return false ;
324
+ }
325
+ }
326
+ return true ;
327
+ }
328
+
329
+ static bool isDestroyArray (SILInstruction *inst) {
330
+ BuiltinInst *bi = dyn_cast<BuiltinInst>(inst);
331
+ return bi && bi->getBuiltinInfo ().ID == BuiltinValueKind::DestroyArray;
332
+ }
333
+
334
+ // / Inserts releases of all stores in \p users.
335
+ static void insertCompensatingReleases (SILInstruction *before,
336
+ const UserList &users) {
337
+ for (SILInstruction *user : users) {
338
+ if (auto *store = dyn_cast<StoreInst>(user)) {
339
+ createDecrementBefore (store->getSrc (), before);
340
+ }
341
+ }
342
+ }
343
+
251
344
// / Analyze the use graph of AllocRef for any uses that would prevent us from
252
345
// / zapping it completely.
253
346
static bool
254
- hasUnremovableUsers (SILInstruction *AllocRef , UserList *Users,
347
+ hasUnremovableUsers (SILInstruction *allocation , UserList *Users,
255
348
bool acceptRefCountInsts, bool onlyAcceptTrivialStores) {
256
349
SmallVector<SILInstruction *, 16 > Worklist;
257
- Worklist.push_back (AllocRef );
350
+ Worklist.push_back (allocation );
258
351
259
352
LLVM_DEBUG (llvm::dbgs () << " Analyzing Use Graph." );
260
353
261
354
SmallVector<RefElementAddrInst *, 8 > refElementAddrs;
262
355
bool deallocationMaybeInlined = false ;
356
+ BuiltinInst *destroyArray = nullptr ;
357
+ AllocRefInst *allocRef = dyn_cast<AllocRefInst>(allocation);
263
358
264
359
while (!Worklist.empty ()) {
265
360
SILInstruction *I = Worklist.pop_back_val ();
@@ -273,17 +368,19 @@ hasUnremovableUsers(SILInstruction *AllocRef, UserList *Users,
273
368
continue ;
274
369
}
275
370
276
- // If we can't zap this instruction... bail...
277
- if (!canZapInstruction (I, acceptRefCountInsts, onlyAcceptTrivialStores)) {
278
- LLVM_DEBUG (llvm::dbgs () << " Found instruction we can't zap...\n " );
279
- return true ;
280
- }
281
-
282
371
if (auto *rea = dyn_cast<RefElementAddrInst>(I)) {
283
372
if (!rea->getType ().isTrivial (*rea->getFunction ()))
284
373
refElementAddrs.push_back (rea);
285
374
} else if (isa<SetDeallocatingInst>(I)) {
286
375
deallocationMaybeInlined = true ;
376
+ } else if (allocRef && Users && isDestroyArray (I)) {
377
+ if (destroyArray)
378
+ return true ;
379
+ destroyArray = cast<BuiltinInst>(I);
380
+ } else if (!canZapInstruction (I, acceptRefCountInsts,
381
+ onlyAcceptTrivialStores)) {
382
+ LLVM_DEBUG (llvm::dbgs () << " Found instruction we can't zap...\n " );
383
+ return true ;
287
384
}
288
385
289
386
// At this point, we can remove the instruction as long as all of its users
@@ -309,6 +406,12 @@ hasUnremovableUsers(SILInstruction *AllocRef, UserList *Users,
309
406
}
310
407
}
311
408
409
+ // In OSSA, we don't have to do this check. We can always accept a
410
+ // destroyArray and insert the compensating destroys right at the store
411
+ // instructions.
412
+ if (destroyArray)
413
+ return !onlyStoresToTailObjects (destroyArray, *Users, allocRef);
414
+
312
415
if (deallocationMaybeInlined) {
313
416
// The alloc_ref is not destructed by a strong_release which is calling the
314
417
// deallocator (destroying all stored properties).
@@ -767,6 +870,11 @@ bool DeadObjectElimination::processAllocRef(AllocRefInst *ARI) {
767
870
return false ;
768
871
}
769
872
873
+ auto iter = std::find_if (UsersToRemove.begin (), UsersToRemove.end (),
874
+ isDestroyArray);
875
+ if (iter != UsersToRemove.end ())
876
+ insertCompensatingReleases (*iter, UsersToRemove);
877
+
770
878
// Remove the AllocRef and all of its users.
771
879
removeInstructions (
772
880
ArrayRef<SILInstruction*>(UsersToRemove.begin (), UsersToRemove.end ()));
0 commit comments