// Values are the FAST_ flags above. uintptr_t bits; private: boolgetBit(uintptr_tbit)const { return bits & bit; }
// Atomically set the bits in `set` and clear the bits in `clear`. // set and clear must not overlap. voidsetAndClearBits(uintptr_tset, uintptr_tclear) { ASSERT((set & clear) == 0); uintptr_t newBits, oldBits = LoadExclusive(&bits); do { newBits = (oldBits | set) & ~clear; } while (slowpath(!StoreReleaseExclusive(&bits, &oldBits, newBits))); }
class_rw_t* data()const{ return (class_rw_t *)(bits & FAST_DATA_MASK); } voidsetData(class_rw_t *newData) { ASSERT(!data() || (newData->flags & (RW_REALIZING | RW_FUTURE))); // Set during realization or construction only. No locking needed. // Use a store-release fence because there may be concurrent // readers of data and data's contents. uintptr_t newBits = (bits & ~FAST_DATA_MASK) | (uintptr_t)newData; atomic_thread_fence(memory_order_release); bits = newBits; }
// Get the class's ro data, even in the presence of concurrent realization. // fixme this isn't really safe without a compiler barrier at least // and probably a memory barrier when realizeClass changes the data field constclass_ro_t *safe_ro()const{ class_rw_t *maybe_rw = data(); if (maybe_rw->flags & RW_REALIZED) { // maybe_rw is rw return maybe_rw->ro(); } else { // maybe_rw is actually ro return (class_ro_t *)maybe_rw; } }
#if SUPPORT_INDEXED_ISA voidsetClassArrayIndex(unsigned Idx){ // 0 is unused as then we can rely on zero-initialisation from calloc. ASSERT(Idx > 0); data()->index = Idx; } #else voidsetClassArrayIndex(__unused unsigned Idx){ } #endif
// fixme remove this once the Swift runtime uses the stable bits boolisSwiftStable_ButAllowLegacyForNow(){ return isAnySwift(); }
_objc_swiftMetadataInitializer swiftMetadataInitializer(){ // This function is called on un-realized classes without // holding any locks. // Beware of races with other realizers. return safe_ro()->swiftMetadataInitializer(); } };
structclass_rw_t { // Be warned that Symbolication knows the layout of this structure. uint32_t flags; uint16_t witness; #if SUPPORT_INDEXED_ISA uint16_t index; #endif
explicit_atomic<uintptr_t> ro_or_rw_ext;
Class firstSubclass; Class nextSiblingClass; //省略代码 ... constmethod_array_tmethods()const{ auto v = get_ro_or_rwe(); if (v.is<class_rw_ext_t *>()) { return v.get<class_rw_ext_t *>(&ro_or_rw_ext)->methods; } else { returnmethod_array_t{v.get<constclass_ro_t *>(&ro_or_rw_ext)->baseMethods()}; } }
constproperty_array_tproperties()const{ auto v = get_ro_or_rwe(); if (v.is<class_rw_ext_t *>()) { return v.get<class_rw_ext_t *>(&ro_or_rw_ext)->properties; } else { returnproperty_array_t{v.get<constclass_ro_t *>(&ro_or_rw_ext)->baseProperties}; } }
constprotocol_array_tprotocols()const{ auto v = get_ro_or_rwe(); if (v.is<class_rw_ext_t *>()) { return v.get<class_rw_ext_t *>(&ro_or_rw_ext)->protocols; } else { returnprotocol_array_t{v.get<constclass_ro_t *>(&ro_or_rw_ext)->baseProtocols}; } } }
union { constuint8_t * ivarLayout; Class nonMetaclass; };
explicit_atomic<constchar *> name; // With ptrauth, this is signed if it points to a small list, but // may be unsigned if it points to a big list. void *baseMethodList; protocol_list_t * baseProtocols; constivar_list_t * ivars;