From 907e5f88164a20fe5e299a6e11d3e15a9dca8350 Mon Sep 17 00:00:00 2001 From: Graham Chapman Date: Mon, 25 Nov 2019 11:06:49 -0500 Subject: [PATCH] Runtime compressed refs work Update ForwardedHeader for runtime compressed refs. Signed-off-by: Graham Chapman --- gc/structs/ForwardedHeader.cpp | 88 ++++-------- gc/structs/ForwardedHeader.hpp | 253 ++++++++++++++++++++++----------- 2 files changed, 200 insertions(+), 141 deletions(-) diff --git a/gc/structs/ForwardedHeader.cpp b/gc/structs/ForwardedHeader.cpp index 989b672da3..c1b9d2bc57 100644 --- a/gc/structs/ForwardedHeader.cpp +++ b/gc/structs/ForwardedHeader.cpp @@ -42,10 +42,13 @@ void MM_ForwardedHeader::ForwardedHeaderDump(omrobjectptr_t destinationObjectPtr) { #if defined (OMR_GC_COMPRESSED_POINTERS) - fprintf(stderr, "MM_ForwardedHeader@%p[%p(%p):%x:%x] -> %p(%p)\n", this, _objectPtr, (uintptr_t*)(*_objectPtr), _preserved.slot, _preserved.overlap, destinationObjectPtr, (uintptr_t*)(*destinationObjectPtr)); -#else /* defined (OMR_GC_COMPRESSED_POINTERS) */ - fprintf(stderr, "MM_ForwardedHeader@%p[%p(%p):%x] -> %p(%p)\n", this, _objectPtr, (uintptr_t*)(*_objectPtr), _preserved.slot, destinationObjectPtr, (uintptr_t*)(*destinationObjectPtr)); + if (compressObjectReferences()) { + fprintf(stderr, "MM_ForwardedHeader@%p[%p(%p):%x:%x] -> %p(%p)\n", this, _objectPtr, (uintptr_t*)(*_objectPtr), getPreservedClassAndTags(), getPreservedOverlapNoCheck(), destinationObjectPtr, (uintptr_t*)(*destinationObjectPtr)); + } else #endif /* defined (OMR_GC_COMPRESSED_POINTERS) */ + { + fprintf(stderr, "MM_ForwardedHeader@%p[%p(%p):%x] -> %p(%p)\n", this, _objectPtr, (uintptr_t*)(*_objectPtr), getPreservedClassAndTags(), destinationObjectPtr, (uintptr_t*)(*destinationObjectPtr)); + } } #endif /* defined(FORWARDEDHEADER_DEBUG) */ @@ -62,19 +65,11 @@ omrobjectptr_t MM_ForwardedHeader::setForwardedObjectInternal(omrobjectptr_t destinationObjectPtr, uintptr_t forwardedTag) { ForwardedHeaderAssert(!isForwardedPointer()); - volatile MutableHeaderFields* objectHeader = (volatile MutableHeaderFields *)((fomrobject_t*)_objectPtr + _forwardingSlotOffset); - uintptr_t oldValue = *(uintptr_t *)&_preserved.slot; + uintptr_t oldValue = getPreserved(); /* Forwarded tag should be in low bits of the pointer and at the same time be in forwarding slot */ -#if defined (OMR_GC_COMPRESSED_POINTERS) && !defined(OMR_ENV_LITTLE_ENDIAN) - /* To get it for compressed big endian just swap halves of pointer */ - uintptr_t newValue = (((uintptr_t)destinationObjectPtr | forwardedTag) << 32) | (((uintptr_t)destinationObjectPtr >> 32) & 0xffffffff); -#else /* defined (OMR_GC_COMPRESSED_POINTERS) && !defined(OMR_ENV_LITTLE_ENDIAN) */ - /* For little endian or not compressed write uintptr_t bytes straight */ - uintptr_t newValue = (uintptr_t)destinationObjectPtr | forwardedTag; -#endif /* defined (OMR_GC_COMPRESSED_POINTERS) && !defined(OMR_ENV_LITTLE_ENDIAN) */ - - if (MM_AtomicOperations::lockCompareExchange((volatile uintptr_t*)&objectHeader->slot, oldValue, newValue) != oldValue) { + uintptr_t newValue = flipValue((uintptr_t)destinationObjectPtr | forwardedTag); + if (MM_AtomicOperations::lockCompareExchange((volatile uintptr_t*)getObject(), oldValue, newValue) != oldValue) { /* If we lost forwarding it, return where we are really forwarded. Another thread could raced us to forward on another location * or (Concurrent world) self-forward it. In the later case, we will return NULL */ MM_ForwardedHeader forwardedObject(_objectPtr, compressObjectReferences()); @@ -102,17 +97,7 @@ MM_ForwardedHeader::getForwardedObject() uintptr_t forwardedTag = _forwardedTag; if (isForwardedPointer()) { #endif -#if defined (OMR_GC_COMPRESSED_POINTERS) && !defined(OMR_ENV_LITTLE_ENDIAN) - /* Compressed big endian - read two halves separately */ - uint32_t hi = (uint32_t)_preserved.overlap; - uint32_t lo = (uint32_t)_preserved.slot & ~forwardedTag; - uintptr_t restoredForwardingSlotValue = (((uintptr_t)hi) <<32 ) | ((uintptr_t)lo); -#else /* defined (OMR_GC_COMPRESSED_POINTERS) && !defined(OMR_ENV_LITTLE_ENDIAN) */ - /* Little endian or not compressed - read all uintptr_t bytes at once */ - uintptr_t restoredForwardingSlotValue = *(uintptr_t *)(&_preserved.slot) & ~forwardedTag; -#endif /* defined (OMR_GC_COMPRESSED_POINTERS) && !defined(OMR_ENV_LITTLE_ENDIAN) */ - - forwardedObject = (omrobjectptr_t)(restoredForwardingSlotValue); + forwardedObject = (omrobjectptr_t)(getCanonicalPreserved() & ~forwardedTag); } return forwardedObject; @@ -131,17 +116,7 @@ MM_ForwardedHeader::getNonStrictForwardedObject() uintptr_t forwardedTag = _forwardedTag; if (isForwardedPointer()) { #endif /* OMR_GC_CONCURRENT_SCAVENGER */ -#if defined (OMR_GC_COMPRESSED_POINTERS) && !defined(OMR_ENV_LITTLE_ENDIAN) - /* Compressed big endian - read two halves separately */ - uint32_t hi = (uint32_t)_preserved.overlap; - uint32_t lo = (uint32_t)_preserved.slot & ~forwardedTag; - uintptr_t restoredForwardingSlotValue = (((uintptr_t)hi) <<32 ) | ((uintptr_t)lo); -#else /* defined (OMR_GC_COMPRESSED_POINTERS) && !defined(OMR_ENV_LITTLE_ENDIAN) */ - /* Little endian or not compressed - read all uintptr_t bytes at once */ - uintptr_t restoredForwardingSlotValue = *(uintptr_t *)(&_preserved.slot) & ~forwardedTag; -#endif /* defined (OMR_GC_COMPRESSED_POINTERS) && !defined(OMR_ENV_LITTLE_ENDIAN) */ - - forwardedObject = (omrobjectptr_t)(restoredForwardingSlotValue); + forwardedObject = (omrobjectptr_t)(getCanonicalPreserved() & ~forwardedTag); } #if defined(OMR_GC_CONCURRENT_SCAVENGER) else if (isSelfForwardedPointer()) { @@ -156,8 +131,7 @@ uintptr_t MM_ForwardedHeader::copySetup(omrobjectptr_t destinationObjectPtr, uintptr_t *remainingSizeToCopy) { #if defined(OMR_GC_CONCURRENT_SCAVENGER) - volatile MutableHeaderFields* objectHeader = (volatile MutableHeaderFields *)((fomrobject_t*)destinationObjectPtr + _forwardingSlotOffset); - uintptr_t copyOffset = sizeof(fomrobject_t) * (_forwardingSlotOffset + 1); + uintptr_t copyOffset = referenceSize(); ForwardedHeaderAssert(*remainingSizeToCopy >= copyOffset); *remainingSizeToCopy = *remainingSizeToCopy - copyOffset; @@ -173,7 +147,7 @@ MM_ForwardedHeader::copySetup(omrobjectptr_t destinationObjectPtr, uintptr_t *re *remainingSizeToCopy -= alignmentResidue; /* set the remaining length to copy */ - objectHeader->slot = (fomrobject_t)(*remainingSizeToCopy | (0 << OUTSTANDING_COPIES_SHIFT)) | _beingCopiedTag; + writeClassSlot(destinationObjectPtr, (*remainingSizeToCopy | (0 << OUTSTANDING_COPIES_SHIFT)) | _beingCopiedTag); /* Make sure that destination object header is visible to other potential participating threads. * About to be executed atomic as part of forwarding operation is executed on source object header, * hence it will not synchronize memory in the destination object header. @@ -191,7 +165,7 @@ MM_ForwardedHeader::copySetup(omrobjectptr_t destinationObjectPtr, uintptr_t *re void MM_ForwardedHeader::copySection(omrobjectptr_t destinationObjectPtr, uintptr_t remainingSizeToCopy, uintptr_t sizeToCopy) { - uintptr_t copyOffset = sizeof(fomrobject_t) * (_forwardingSlotOffset + 1) + remainingSizeToCopy; + uintptr_t copyOffset = referenceSize() + remainingSizeToCopy; void *dstStartAddress = (void *)((uintptr_t)destinationObjectPtr + copyOffset); void *srcStartAddress = (void *)((uintptr_t)_objectPtr + copyOffset); @@ -203,17 +177,16 @@ MM_ForwardedHeader::copySection(omrobjectptr_t destinationObjectPtr, uintptr_t r omrobjectptr_t MM_ForwardedHeader::setSelfForwardedObject() { - volatile MutableHeaderFields* objectHeader = (volatile MutableHeaderFields *)((fomrobject_t*)_objectPtr + _forwardingSlotOffset); - fomrobject_t oldValue = _preserved.slot; + uintptr_t oldValue = getPreservedClassAndTags(); - fomrobject_t newValue = oldValue | _selfForwardedTag; + uintptr_t newValue = oldValue | _selfForwardedTag; - omrobjectptr_t forwardedObject = _objectPtr; + omrobjectptr_t forwardedObject = getObject(); - if (oldValue != lockCompareExchangeObjectHeader(&objectHeader->slot, oldValue, newValue)) { + if (oldValue != lockCompareExchangeObjectHeader(forwardedObject, oldValue, newValue)) { /* If we lost on self-forwarding, return where we are really forwarded. We could still be self-forwarded (another thread raced us) or * strictly forwarded (another thread successfully copied the object). Either way, getNonStrictForwardedObject() should return us where we really are. */ - MM_ForwardedHeader forwardedHeader(_objectPtr, compressObjectReferences()); + MM_ForwardedHeader forwardedHeader(forwardedObject, compressObjectReferences()); forwardedObject = forwardedHeader.getNonStrictForwardedObject(); } @@ -225,17 +198,16 @@ void MM_ForwardedHeader::restoreSelfForwardedPointer() { ForwardedHeaderAssert(isSelfForwardedPointer()); - volatile MutableHeaderFields* objectHeader = (volatile MutableHeaderFields *)((fomrobject_t*)_objectPtr + _forwardingSlotOffset); - fomrobject_t oldValue = _preserved.slot; + uintptr_t oldValue = getPreservedClassAndTags(); - fomrobject_t newValue = oldValue & ~_selfForwardedTag; + uintptr_t newValue = oldValue & ~_selfForwardedTag; - objectHeader->slot = newValue; + writeClassSlot(getObject(), newValue); } uintptr_t -MM_ForwardedHeader::winObjectSectionToCopy(volatile fomrobject_t *copyProgressSlot, fomrobject_t oldValue, uintptr_t *remainingSizeToCopy, uintptr_t outstandingCopies) +MM_ForwardedHeader::winObjectSectionToCopy(volatile omrobjectptr_t copyProgressSlot, uintptr_t oldValue, uintptr_t *remainingSizeToCopy, uintptr_t outstandingCopies) { /* take small section (about 1%) to copy now to maximize parallelism */ uintptr_t sizeToCopy = SIZE_OF_SECTION_TO_COPY(*remainingSizeToCopy) & ~_copySizeAlignement; @@ -268,14 +240,13 @@ MM_ForwardedHeader::wait(uintptr_t *spinCount) { void MM_ForwardedHeader::copyOrWaitOutline(omrobjectptr_t destinationObjectPtr) { - volatile MutableHeaderFields* objectHeader = (volatile MutableHeaderFields *)((fomrobject_t*)destinationObjectPtr + _forwardingSlotOffset); uintptr_t spinCount = 10; bool participatingInCopy = false; while (true) { uintptr_t sizeToCopy = 0, remainingSizeToCopy = 0; do { - fomrobject_t oldValue = objectHeader->slot; + uintptr_t oldValue = readClassSlot(destinationObjectPtr); if (0 == (oldValue & _beingCopiedTag)) { /* the object is fully copied */ return; @@ -286,8 +257,8 @@ MM_ForwardedHeader::copyOrWaitOutline(omrobjectptr_t destinationObjectPtr) if (0 == remainingSizeToCopy) { if (participatingInCopy) { MM_AtomicOperations::storeSync(); - fomrobject_t newValue = (fomrobject_t)(((outstandingCopies - 1) << OUTSTANDING_COPIES_SHIFT) | _beingCopiedTag); - if (oldValue != lockCompareExchangeObjectHeader(&objectHeader->slot, oldValue, newValue)) { + uintptr_t newValue = ((outstandingCopies - 1) << OUTSTANDING_COPIES_SHIFT) | _beingCopiedTag; + if (oldValue != lockCompareExchangeObjectHeader(destinationObjectPtr, oldValue, newValue)) { continue; } participatingInCopy = false; @@ -306,7 +277,7 @@ MM_ForwardedHeader::copyOrWaitOutline(omrobjectptr_t destinationObjectPtr) } } - sizeToCopy = winObjectSectionToCopy(&objectHeader->slot, oldValue, &remainingSizeToCopy, outstandingCopies); + sizeToCopy = winObjectSectionToCopy(destinationObjectPtr, oldValue, &remainingSizeToCopy, outstandingCopies); } while (0 == sizeToCopy); participatingInCopy = true; @@ -320,13 +291,12 @@ void MM_ForwardedHeader::copyOrWaitWinner(omrobjectptr_t destinationObjectPtr) { #if defined(OMR_GC_CONCURRENT_SCAVENGER) - volatile MutableHeaderFields* objectHeader = (volatile MutableHeaderFields *)((fomrobject_t*)destinationObjectPtr + _forwardingSlotOffset); uintptr_t spinCount = 10; while (true) { uintptr_t remainingSizeToCopy = 0, sizeToCopy = 0; do { - fomrobject_t oldValue = objectHeader->slot; + uintptr_t oldValue = readClassSlot(destinationObjectPtr); remainingSizeToCopy = (uintptr_t)(oldValue & _remainingSizeMask); uintptr_t outstandingCopies = ((uintptr_t)oldValue & _copySizeAlignement) >> OUTSTANDING_COPIES_SHIFT; @@ -339,7 +309,7 @@ MM_ForwardedHeader::copyOrWaitWinner(omrobjectptr_t destinationObjectPtr) } } - sizeToCopy = winObjectSectionToCopy(&objectHeader->slot, oldValue, &remainingSizeToCopy, outstandingCopies); + sizeToCopy = winObjectSectionToCopy(destinationObjectPtr, oldValue, &remainingSizeToCopy, outstandingCopies); } while (0 == sizeToCopy); copySection(destinationObjectPtr, remainingSizeToCopy, sizeToCopy); diff --git a/gc/structs/ForwardedHeader.hpp b/gc/structs/ForwardedHeader.hpp index 5d8786f764..8646e2a655 100644 --- a/gc/structs/ForwardedHeader.hpp +++ b/gc/structs/ForwardedHeader.hpp @@ -79,16 +79,7 @@ class MM_ForwardedHeader public: protected: private: - /* - * First slot in preserved header fields is always aligned to uintptr_t (compressed or not) - * so forwarded pointer is stored in uintptr_t word at forwarding slot address and overlaps - * next slot for compressed. So, in any case this reads all preserved header fields: - * - * *(uintptr_t *)&a.slot = *((uintptr_t *)&b.slot); - * - * for MutableHeaderFields a, b. - */ - struct MutableHeaderFields { +struct MutableHeaderFields { /* first slot must be always aligned as for an object slot */ fomrobject_t slot; @@ -96,22 +87,20 @@ class MM_ForwardedHeader /* this field must be here to reserve space if slots are 4 bytes long (extend to 8 bytes starting from &MutableHeaderFields.clazz) */ uint32_t overlap; #endif /* defined (OMR_GC_COMPRESSED_POINTERS) */ - }; - +}; omrobjectptr_t _objectPtr; /**< the object on which to act */ - MutableHeaderFields _preserved; /**< a backup copy of the header fields which may be modified by this class */ - const uintptr_t _forwardingSlotOffset; /**< fomrobject_t offset from _objectPtr to fomrobject_t slot that will hold the forwarding pointer */ + uintptr_t _preserved; /**< a backup copy of the header fields which may be modified by this class */ #if defined(OMR_GC_COMPRESSED_POINTERS) && defined(OMR_GC_FULL_POINTERS) bool const _compressObjectReferences; #endif /* defined(OMR_GC_COMPRESSED_POINTERS) && defined(OMR_GC_FULL_POINTERS) */ static const uintptr_t _forwardedTag = OMR_FORWARDED_TAG; /**< bit mask used to mark forwarding slot value as forwarding pointer */ #if defined(OMR_GC_CONCURRENT_SCAVENGER) - static const fomrobject_t _selfForwardedTag = (fomrobject_t)(_forwardedTag | OMR_SELF_FORWARDED_TAG); - static const fomrobject_t _beingCopiedHint = (fomrobject_t)OMR_BEING_COPIED_HINT; /**< used in source object f/w pointer to hint that object might still be being copied */ - static const fomrobject_t _beingCopiedTag = (fomrobject_t)OMR_BEING_COPIED_TAG; /**< used in destination object, but using the same bit as _forwardedTag in source object */ - static const fomrobject_t _remainingSizeMask = (fomrobject_t)REMAINING_SIZE_MASK; - static const fomrobject_t _copyProgressInfoMask = (fomrobject_t)(_remainingSizeMask | OUTSTANDING_COPIES_MASK); + static const uintptr_t _selfForwardedTag = (uintptr_t)(_forwardedTag | OMR_SELF_FORWARDED_TAG); + static const uintptr_t _beingCopiedHint = (uintptr_t)OMR_BEING_COPIED_HINT; /**< used in source object f/w pointer to hint that object might still be being copied */ + static const uintptr_t _beingCopiedTag = (uintptr_t)OMR_BEING_COPIED_TAG; /**< used in destination object, but using the same bit as _forwardedTag in source object */ + static const uintptr_t _remainingSizeMask = (uintptr_t)REMAINING_SIZE_MASK; + static const uintptr_t _copyProgressInfoMask = (uintptr_t)(_remainingSizeMask | OUTSTANDING_COPIES_MASK); static const uintptr_t _copySizeAlignement = (uintptr_t)SIZE_ALIGNMENT; static const uintptr_t _minIncrement = (131072 & _remainingSizeMask); /**< min size of copy section; does not have to be a power of 2, but it has to be aligned with _copySizeAlignement */ #endif /* OMR_GC_CONCURRENT_SCAVENGER */ @@ -120,16 +109,120 @@ class MM_ForwardedHeader * Function members */ private: - MMINLINE_DEBUG static fomrobject_t - lockCompareExchangeObjectHeader(volatile fomrobject_t *address, fomrobject_t oldValue, fomrobject_t newValue) + /** + * Return the size of an object to object reference + */ + MMINLINE uintptr_t + referenceSize() { + uintptr_t size = sizeof(uintptr_t); + if (compressObjectReferences()) { + size = sizeof(uint32_t); + } + return size; + } + + /** + * Fetch the class portion of the preserved data (with any tags). + * + * @return the class and tags + */ + MMINLINE uintptr_t + getPreservedClassAndTags() + { + uintptr_t result = _preserved; #if defined(OMR_GC_COMPRESSED_POINTERS) - return MM_AtomicOperations::lockCompareExchangeU32((volatile uint32_t*)address, oldValue, newValue); -#else /* OMR_GC_COMPRESSED_POINTERS */ - return MM_AtomicOperations::lockCompareExchange((volatile uintptr_t*)address, oldValue, newValue); -#endif /* OMR_GC_COMPRESSED_POINTERS */ + if (compressObjectReferences()) { +#if defined(OMR_ENV_LITTLE_ENDIAN) + result &= 0xFFFFFFFF; +#else /* defined(OMR_ENV_LITTLE_ENDIAN) */ + result >>= 32; +#endif /* defined(OMR_ENV_LITTLE_ENDIAN) */ + } +#endif /* defined(OMR_GC_COMPRESSED_POINTERS) */ + return result; } - + + /** + * Fetch the complete preserved value in memory order. + * + * @return the preserved value + */ + MMINLINE uintptr_t + getPreserved() + { + return _preserved; + } + + /** + * Fetch the complete preserved value, endian flipping if necessary to ensure + * any tag bits appear in the low-order bits. + * + * @return the preserved value in canonical format + */ + MMINLINE uintptr_t + getCanonicalPreserved() + { + return flipValue(getPreserved()); + } + + /** + * Endian flip a 64-bit value if running on compressed big endian. + * + * @return the flipped value on compressed big endian, the input value otherwise + */ + MMINLINE uintptr_t + flipValue(uintptr_t value) + { +#if defined(OMR_GC_COMPRESSED_POINTERS) && !defined(OMR_ENV_LITTLE_ENDIAN) + if (compressObjectReferences()) { + value = (value >> 32) | (value << 32); + } +#endif /* defined(OMR_GC_COMPRESSED_POINTERS) && !defined(OMR_ENV_LITTLE_ENDIAN) */ + return value; + } + + MMINLINE_DEBUG uintptr_t + lockCompareExchangeObjectHeader(volatile omrobjectptr_t address, uintptr_t oldValue, uintptr_t newValue) + { + uintptr_t result = 0; + if (compressObjectReferences()) { + result = MM_AtomicOperations::lockCompareExchangeU32((volatile uint32_t*)address, (uint32_t)oldValue, (uint32_t)newValue); + } else { + result = MM_AtomicOperations::lockCompareExchange((volatile uintptr_t*)address, oldValue, newValue); + } + return result; + } + + /** + * Write the class slot of an object pointer + */ + MMINLINE void + writeClassSlot(omrobjectptr_t destinationObjectPtr, uintptr_t newValue) + { + if (compressObjectReferences()) { + *(uint32_t*)destinationObjectPtr = (uint32_t)newValue; + } else { + *(uintptr_t*)destinationObjectPtr = newValue; + } + } + + /** + * Read the class slot from an object pointer + * @return the slot value, zero-extended to uintptr_t (for compressed refs) + */ + MMINLINE uintptr_t + readClassSlot(omrobjectptr_t destinationObjectPtr) + { + uintptr_t value = 0; + if (compressObjectReferences()) { + value = *(uint32_t*)destinationObjectPtr; + } else { + value = *(uintptr_t*)destinationObjectPtr; + } + return value; + } + /** * Atomically try to win forwarding. It's internal implementation of public setForwardedObject() */ @@ -139,7 +232,7 @@ class MM_ForwardedHeader /** * Try to win a section of large object that is still being copied */ - static uintptr_t winObjectSectionToCopy(volatile fomrobject_t *copyProgressSlot, fomrobject_t oldValue, uintptr_t *remainingSizeToCopy, uintptr_t outstandingCopies); + uintptr_t winObjectSectionToCopy(volatile omrobjectptr_t copyProgressSlot, uintptr_t oldValue, uintptr_t *remainingSizeToCopy, uintptr_t outstandingCopies); /** * Just spin (or pause) for certain amount of cycles @@ -153,7 +246,7 @@ class MM_ForwardedHeader isBeingCopied() { /* strictly forwarded object with _beingCopiedHint set */ - return (_beingCopiedHint | _forwardedTag) == (_preserved.slot & (_beingCopiedHint | _selfForwardedTag)); + return (_beingCopiedHint | _forwardedTag) == (getPreservedClassAndTags() & (_beingCopiedHint | _selfForwardedTag)); } /** @@ -231,7 +324,7 @@ class MM_ForwardedHeader MMINLINE bool isForwardedPointer() { - return _forwardedTag == ((uintptr_t)_preserved.slot & _forwardedTag); + return _forwardedTag == (getPreservedClassAndTags() & _forwardedTag); } #if defined(OMR_GC_CONCURRENT_SCAVENGER) @@ -243,14 +336,14 @@ class MM_ForwardedHeader MMINLINE bool isSelfForwardedPointer() { - return _selfForwardedTag == (_preserved.slot & _selfForwardedTag); + return _selfForwardedTag == (getPreservedClassAndTags() & _selfForwardedTag); } MMINLINE bool isStrictlyForwardedPointer() { /* only _forwardedTag set ('self forwarded bit' reset) */ - return _forwardedTag == (_preserved.slot & _selfForwardedTag); + return _forwardedTag == (getPreservedClassAndTags() & _selfForwardedTag); } omrobjectptr_t setSelfForwardedObject(); @@ -258,19 +351,21 @@ class MM_ForwardedHeader void restoreSelfForwardedPointer(); /** - * Initial step for destination object fixup - restore object flags and overlap, while still maintaining progess info and being copied bit. + * Initial step for destination object fixup - restore object flags and overlap, while still maintaining progress info and being copied bit. */ MMINLINE void commenceFixup(omrobjectptr_t destinationObjectPtr) { - MutableHeaderFields* newHeader = (MutableHeaderFields *)((fomrobject_t *)destinationObjectPtr + _forwardingSlotOffset); - - /* copy preserved flags, and keep the rest (which should be all 0s) */ - newHeader->slot = (_preserved.slot & ~_copyProgressInfoMask) | _beingCopiedTag; - -#if defined (OMR_GC_COMPRESSED_POINTERS) - newHeader->overlap = _preserved.overlap; -#endif /* defined (OMR_GC_COMPRESSED_POINTERS) */ + uintptr_t mask = ~_copyProgressInfoMask; +#if defined(OMR_GC_COMPRESSED_POINTERS) + if (compressObjectReferences()) { + /* _copyProgressInfoMask has the high 32 bits set, so they will be 0 in mask. + * Update mask to not remove the overlap. + */ + mask |= 0xFFFFFFFF00000000; + } +#endif /* defined(OMR_GC_COMPRESSED_POINTERS) */ + *(uintptr_t*)destinationObjectPtr = flipValue((getCanonicalPreserved() & mask) | _beingCopiedTag); } /** @@ -280,17 +375,16 @@ class MM_ForwardedHeader MMINLINE void commitFixup(omrobjectptr_t destinationObjectPtr) { - MutableHeaderFields* newHeader = (MutableHeaderFields *)((fomrobject_t *)destinationObjectPtr + _forwardingSlotOffset); - /* before we announce this copy of the object is available, do a write sync */ MM_AtomicOperations::storeSync(); - + /* get flags */ - newHeader->slot = (_preserved.slot & _copyProgressInfoMask) | (newHeader->slot & ~_copyProgressInfoMask & ~(fomrobject_t)_beingCopiedTag); - + uintptr_t newValue = (getPreservedClassAndTags() & _copyProgressInfoMask) | (readClassSlot(destinationObjectPtr) & ~(_copyProgressInfoMask | _beingCopiedTag)); + writeClassSlot(destinationObjectPtr, newValue); + /* remove the hint in the source object */ - volatile MutableHeaderFields* objectHeader = (volatile MutableHeaderFields *)((fomrobject_t*)_objectPtr + _forwardingSlotOffset); - objectHeader->slot &= ~_beingCopiedHint; + newValue = readClassSlot(getObject()) & ~_beingCopiedHint; + writeClassSlot(getObject(), newValue); } /** @@ -341,14 +435,32 @@ class MM_ForwardedHeader * * @see isForwardedPointer() */ - MMINLINE fomrobject_t + MMINLINE uintptr_t getPreservedSlot() { ForwardedHeaderAssert(!isForwardedPointer()); - return _preserved.slot; + return getPreservedClassAndTags(); } #if defined (OMR_GC_COMPRESSED_POINTERS) + /** + * Fetch the overlap portion of the preserved data (with any tags). + * + * @return the class and tags + */ + MMINLINE uint32_t + getPreservedOverlapNoCheck() + { + ForwardedHeaderAssert(compressObjectReferences()); + uintptr_t result = _preserved; +#if defined(OMR_ENV_LITTLE_ENDIAN) + result >>= 32; +#else /* defined(OMR_ENV_LITTLE_ENDIAN) */ + result &= 0xFFFFFFFF; +#endif /* defined(OMR_ENV_LITTLE_ENDIAN) */ + return (uint32_t)result; + } + /** * This method will assert if the object has been forwarded. Use isForwardedPointer() to test before calling. * @@ -360,7 +472,7 @@ class MM_ForwardedHeader getPreservedOverlap() { ForwardedHeaderAssert(!isForwardedPointer()); - return _preserved.overlap; + return getPreservedOverlapNoCheck(); } /** @@ -372,7 +484,9 @@ class MM_ForwardedHeader MMINLINE void restoreDestroyedOverlap(uint32_t restoredValue) { - ((MutableHeaderFields *)((fomrobject_t *)getObject() + _forwardingSlotOffset))->overlap = restoredValue; + ForwardedHeaderAssert(compressObjectReferences()); + uint32_t *header = (uint32_t*)getObject(); + header[1] = restoredValue; } /** @@ -381,7 +495,8 @@ class MM_ForwardedHeader MMINLINE void restoreDestroyedOverlap() { - restoreDestroyedOverlap(((MutableHeaderFields *)((fomrobject_t *)getForwardedObject() + _forwardingSlotOffset))->overlap); + uint32_t *header = (uint32_t*)getForwardedObject(); + restoreDestroyedOverlap(header[1]); } #endif /* defined(OMR_GC_COMPRESSED_POINTERS) */ @@ -397,11 +512,7 @@ class MM_ForwardedHeader MMINLINE void fixupForwardedObject(omrobjectptr_t destinationObjectPtr) { - MutableHeaderFields* newHeader = (MutableHeaderFields *)((fomrobject_t *)destinationObjectPtr + _forwardingSlotOffset); - newHeader->slot = _preserved.slot; -#if defined (OMR_GC_COMPRESSED_POINTERS) - newHeader->overlap = _preserved.overlap; -#endif /* defined (OMR_GC_COMPRESSED_POINTERS) */ + *(uintptr_t*)destinationObjectPtr = getPreserved(); } /** @@ -415,7 +526,7 @@ class MM_ForwardedHeader MMINLINE bool isReverseForwardedPointer() { - return J9_GC_MULTI_SLOT_HOLE == ((uintptr_t)getPreservedSlot() & J9_GC_OBJ_HEAP_HOLE_MASK); + return J9_GC_MULTI_SLOT_HOLE == (getPreservedSlot() & J9_GC_OBJ_HEAP_HOLE_MASK); } /** @@ -430,11 +541,7 @@ class MM_ForwardedHeader { ForwardedHeaderAssert(isReverseForwardedPointer()); MM_HeapLinkedFreeHeader* freeHeader = MM_HeapLinkedFreeHeader::getHeapLinkedFreeHeader(_objectPtr); -#if defined(OMR_GC_COMPRESSED_POINTERS) - return (omrobjectptr_t) freeHeader->getNext(true); -#else /* OMR_GC_COMPRESSED_POINTERS */ - return (omrobjectptr_t) freeHeader->getNext(false); -#endif /* OMR_GC_COMPRESSED_POINTERS */ + return (omrobjectptr_t) freeHeader->getNext(compressObjectReferences()); } /** @@ -445,29 +552,11 @@ class MM_ForwardedHeader */ MM_ForwardedHeader(omrobjectptr_t objectPtr, bool compressed) : _objectPtr(objectPtr) - , _forwardingSlotOffset(0) + , _preserved(*(volatile uintptr_t *)_objectPtr) #if defined(OMR_GC_COMPRESSED_POINTERS) && defined(OMR_GC_FULL_POINTERS) , _compressObjectReferences(compressed) #endif /* defined(OMR_GC_COMPRESSED_POINTERS) && defined(OMR_GC_FULL_POINTERS) */ { - /* TODO: Fix the constraint that the object header/forwarding slot offset must be zero. */ - volatile MutableHeaderFields* originalHeader = (volatile MutableHeaderFields *)((fomrobject_t*)_objectPtr + _forwardingSlotOffset); - *(uintptr_t *)&_preserved.slot = *((uintptr_t *)&originalHeader->slot); - } - - /** - * TODO: Remove once matching openj9 change is made. - * - * @param[in] objectPtr pointer to the object, which may or may not have been forwarded - * @param[in] forwardingSlotOffset fomrobject_t offset to uintptr_t size slot that will hold the forwarding pointer - */ - MM_ForwardedHeader(omrobjectptr_t objectPtr) - : _objectPtr(objectPtr) - , _forwardingSlotOffset(0) - { - /* TODO: Fix the constraint that the object header/forwarding slot offset must be zero. */ - volatile MutableHeaderFields* originalHeader = (volatile MutableHeaderFields *)((fomrobject_t*)_objectPtr + _forwardingSlotOffset); - *(uintptr_t *)&_preserved.slot = *((uintptr_t *)&originalHeader->slot); } protected: