-
Notifications
You must be signed in to change notification settings - Fork 10.5k
DeadObjectElimination: delete dead arrays for which the "destroyArray" builtin is inlined. #35737
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
f12ac4c
34ae1d9
028afc0
1655e22
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -36,6 +36,7 @@ | |
#include "swift/SIL/SILInstruction.h" | ||
#include "swift/SIL/SILModule.h" | ||
#include "swift/SIL/SILUndef.h" | ||
#include "swift/SIL/MemAccessUtils.h" | ||
#include "swift/SILOptimizer/Analysis/ArraySemantic.h" | ||
#include "swift/SILOptimizer/PassManager/Passes.h" | ||
#include "swift/SILOptimizer/PassManager/Transforms.h" | ||
|
@@ -248,18 +249,112 @@ static bool canZapInstruction(SILInstruction *Inst, bool acceptRefCountInsts, | |
return false; | ||
} | ||
|
||
/// Returns true if all stores in \p users store to the tail elements of | ||
/// \p allocRef, which are destroyed by the \p destroyArray builtin. | ||
static bool onlyStoresToTailObjects(BuiltinInst *destroyArray, | ||
const UserList &users, | ||
AllocRefInst *allocRef) { | ||
// Get the number of destroyed elements. | ||
auto *literal = dyn_cast<IntegerLiteralInst>(destroyArray->getArguments()[2]); | ||
if (!literal || literal->getValue().getMinSignedBits() > 32) | ||
return false; | ||
int numDestroyed = literal->getValue().getSExtValue(); | ||
|
||
SILFunction *func = destroyArray->getFunction(); | ||
SILBasicBlock *storesBlock = nullptr; | ||
|
||
// Check if the destroyArray destroys the tail elements of allocRef. | ||
auto destroyPath = AccessPath::compute(destroyArray->getArguments()[1]); | ||
if (destroyPath != AccessPath::forTailStorage(allocRef)) | ||
return false; | ||
|
||
SmallVector<AccessPath, 32> pathsToCheck; | ||
|
||
// Check all stores to the tail elements. | ||
for (SILInstruction *user : users) { | ||
auto *store = dyn_cast<StoreInst>(user); | ||
if (!store) | ||
continue; | ||
|
||
assert(users.count(store->getSrc()->getDefiningInstruction()) == 0 && | ||
"Storing a use of an array (that would mean the array escapes)?"); | ||
|
||
// All stores must be in the same block. This ensure that the stores | ||
// dominate the destroyArray (which may be in a different block). | ||
if (storesBlock && store->getParent() != storesBlock) | ||
return false; | ||
storesBlock = store->getParent(); | ||
|
||
AccessPath storePath = AccessPath::compute(store->getDest()); | ||
if (!storePath.isValid()) | ||
return false; | ||
|
||
// We don't care about trivial stores. | ||
if (store->getSrc()->getType().isTrivial(*func)) | ||
continue; | ||
|
||
// Check if it's a store to the tail elements. | ||
if (!destroyPath.contains(storePath.withOffset(0))) | ||
return false; | ||
|
||
// Check if the store is within the range of the destroyed array. In OSSA | ||
// we would not need this check. Otherwise it would be a memory lifetime | ||
// failure. | ||
if (storePath.getOffset() < 0 || storePath.getOffset() >= numDestroyed) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Wow, I can't imagine how this check could be hit, but it doesn't hurt to check I guess. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah, it's not something which is ever generated from swift source, but in SIL it's legal. |
||
return false; | ||
|
||
pathsToCheck.push_back(storePath); | ||
} | ||
|
||
// In non-OSSA we have to check if two paths overlap, because we could end up | ||
// over-releasing the stored objects. | ||
// Group the paths by tail-element index, so that we only have to check within | ||
// a tail-element group. | ||
std::sort(pathsToCheck.begin(), pathsToCheck.end(), [](AccessPath p1, AccessPath p2) { | ||
return p1.getOffset() < p2.getOffset(); | ||
}); | ||
for (unsigned i = 0, n = pathsToCheck.size(); i < n; ++i) { | ||
for (unsigned j = i + 1; | ||
j < n && pathsToCheck[i].getOffset() == pathsToCheck[j].getOffset(); ++j) { | ||
if (pathsToCheck[i].mayOverlap(pathsToCheck[j])) | ||
return false; | ||
// Limit the number of checks to avoid quadratic complexity. | ||
if (j > i + 8) | ||
return false; | ||
} | ||
} | ||
return true; | ||
} | ||
|
||
static bool isDestroyArray(SILInstruction *inst) { | ||
BuiltinInst *bi = dyn_cast<BuiltinInst>(inst); | ||
return bi && bi->getBuiltinInfo().ID == BuiltinValueKind::DestroyArray; | ||
} | ||
|
||
/// Inserts releases of all stores in \p users. | ||
static void insertCompensatingReleases(SILInstruction *before, | ||
const UserList &users) { | ||
for (SILInstruction *user : users) { | ||
if (auto *store = dyn_cast<StoreInst>(user)) { | ||
createDecrementBefore(store->getSrc(), before); | ||
} | ||
} | ||
} | ||
|
||
/// Analyze the use graph of AllocRef for any uses that would prevent us from | ||
/// zapping it completely. | ||
static bool | ||
hasUnremovableUsers(SILInstruction *AllocRef, UserList *Users, | ||
hasUnremovableUsers(SILInstruction *allocation, UserList *Users, | ||
bool acceptRefCountInsts, bool onlyAcceptTrivialStores) { | ||
SmallVector<SILInstruction *, 16> Worklist; | ||
Worklist.push_back(AllocRef); | ||
Worklist.push_back(allocation); | ||
|
||
LLVM_DEBUG(llvm::dbgs() << " Analyzing Use Graph."); | ||
|
||
SmallVector<RefElementAddrInst *, 8> refElementAddrs; | ||
bool deallocationMaybeInlined = false; | ||
BuiltinInst *destroyArray = nullptr; | ||
AllocRefInst *allocRef = dyn_cast<AllocRefInst>(allocation); | ||
|
||
while (!Worklist.empty()) { | ||
SILInstruction *I = Worklist.pop_back_val(); | ||
|
@@ -273,17 +368,19 @@ hasUnremovableUsers(SILInstruction *AllocRef, UserList *Users, | |
continue; | ||
} | ||
|
||
// If we can't zap this instruction... bail... | ||
if (!canZapInstruction(I, acceptRefCountInsts, onlyAcceptTrivialStores)) { | ||
LLVM_DEBUG(llvm::dbgs() << " Found instruction we can't zap...\n"); | ||
return true; | ||
} | ||
|
||
if (auto *rea = dyn_cast<RefElementAddrInst>(I)) { | ||
if (!rea->getType().isTrivial(*rea->getFunction())) | ||
refElementAddrs.push_back(rea); | ||
} else if (isa<SetDeallocatingInst>(I)) { | ||
deallocationMaybeInlined = true; | ||
} else if (allocRef && Users && isDestroyArray(I)) { | ||
if (destroyArray) | ||
return true; | ||
destroyArray = cast<BuiltinInst>(I); | ||
} else if (!canZapInstruction(I, acceptRefCountInsts, | ||
onlyAcceptTrivialStores)) { | ||
LLVM_DEBUG(llvm::dbgs() << " Found instruction we can't zap...\n"); | ||
return true; | ||
} | ||
|
||
// At this point, we can remove the instruction as long as all of its users | ||
|
@@ -309,6 +406,12 @@ hasUnremovableUsers(SILInstruction *AllocRef, UserList *Users, | |
} | ||
} | ||
|
||
// In OSSA, we don't have to do this check. We can always accept a | ||
// destroyArray and insert the compensating destroys right at the store | ||
// instructions. | ||
if (destroyArray) | ||
return !onlyStoresToTailObjects(destroyArray, *Users, allocRef); | ||
|
||
if (deallocationMaybeInlined) { | ||
// The alloc_ref is not destructed by a strong_release which is calling the | ||
// deallocator (destroying all stored properties). | ||
|
@@ -767,6 +870,11 @@ bool DeadObjectElimination::processAllocRef(AllocRefInst *ARI) { | |
return false; | ||
} | ||
|
||
auto iter = std::find_if(UsersToRemove.begin(), UsersToRemove.end(), | ||
isDestroyArray); | ||
if (iter != UsersToRemove.end()) | ||
insertCompensatingReleases(*iter, UsersToRemove); | ||
|
||
// Remove the AllocRef and all of its users. | ||
removeInstructions( | ||
ArrayRef<SILInstruction*>(UsersToRemove.begin(), UsersToRemove.end())); | ||
|
Uh oh!
There was an error while loading. Please reload this page.