structcache_t { //省略代码 public: // The following four fields are public for objcdt's use only. // objcdt reaches into fields while the process is suspended // hence doesn't care for locks and pesky little details like this // and can safely use these. unsignedcapacity()const; struct bucket_t *buckets()const;//重点 Class cls()const;
// Use the cache as-is if until we exceed our expected fill ratio. mask_t newOccupied = occupied() + 1; ///取当前占用的空间大小 unsigned oldCapacity = capacity(), capacity = oldCapacity; if (slowpath(isConstantEmptyCache())) { // Cache is read-only. Replace it. if (!capacity) capacity = INIT_CACHE_SIZE; reallocate(oldCapacity, capacity, /* freeOld */false); } elseif (fastpath(newOccupied + CACHE_END_MARKER <= cache_fill_ratio(capacity))) { // Cache is less than 3/4 or 7/8 full. Use it as-is. } #if CACHE_ALLOW_FULL_UTILIZATION elseif (capacity <= FULL_UTILIZATION_CACHE_SIZE && newOccupied + CACHE_END_MARKER <= capacity) { // Allow 100% cache utilization for small buckets. Use it as-is. } #endif else { capacity = capacity ? capacity * 2 : INIT_CACHE_SIZE; if (capacity > MAX_CACHE_SIZE) { capacity = MAX_CACHE_SIZE; } ///分配空间 reallocate(oldCapacity, capacity, true); } ///省略代码 }
1 2 3 4 5 6 7 8 9 10 11 12 13
#if CACHE_END_MARKER || (__arm64__ && !__LP64__) // When we have a cache end marker it fills a bucket slot, so having a // initial cache size of 2 buckets would not be efficient when one of the // slots is always filled with the end marker. So start with a cache size // 4 buckets. INIT_CACHE_SIZE_LOG2 = 2, #else // Allow an initial bucket size of 2 buckets, since a large number of // classes, especially metaclasses, have very few imps, and we support // the ability to fill 100% of the cache before resizing. INIT_CACHE_SIZE_LOG2 = 1, #endif INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2),
voidcache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask) { // objc_msgSend uses mask and buckets with no locks. // It is safe for objc_msgSend to see new buckets but old mask. // (It will get a cache miss but not overrun the buckets' bounds). // It is unsafe for objc_msgSend to see old buckets and new mask. // Therefore we write new buckets, wait a lot, then write new mask. // objc_msgSend reads mask first, then buckets.
#ifdef __arm__ // ensure other threads see buckets contents before buckets pointer mega_barrier();
// ensure other threads see new buckets before new mask mega_barrier();
_maybeMask.store(newMask, memory_order_relaxed); _occupied = 0; #elif __x86_64__ || i386 // ensure other threads see buckets contents before buckets pointer _bucketsAndMaybeMask.store((uintptr_t)newBuckets, memory_order_release);
// ensure other threads see new buckets before new mask _maybeMask.store(newMask, memory_order_release); _occupied = 0; #else #error Don't know how to do setBucketsAndMask on this architecture. #endif }
voidcache_t::insert(SEL sel, IMP imp, id receiver) { runtimeLock.assertLocked();//加锁 // Never cache before +initialize is done if (slowpath(!cls()->isInitialized())) {//initialize调用之后才会缓存 return; } if (isConstantOptimizedCache()) {//内联函数 return false 所以if里不会执行 _objc_fatal("cache_t::insert() called with a preoptimized cache for %s", cls()->nameForLogging()); } #if DEBUG_TASK_THREADS return _collecting_in_critical(); #else #if CONFIG_USE_CACHE_LOCK mutex_locker_tlock(cacheUpdateLock); #endif
ASSERT(sel != 0 && cls()->isInitialized());
// Use the cache as-is if until we exceed our expected fill ratio. mask_t newOccupied = occupied() + 1; unsigned oldCapacity = capacity(), capacity = oldCapacity; if (slowpath(isConstantEmptyCache())) { // Cache is read-only. Replace it. if (!capacity) capacity = INIT_CACHE_SIZE; reallocate(oldCapacity, capacity, /* freeOld */false); } elseif (fastpath(newOccupied + CACHE_END_MARKER <= cache_fill_ratio(capacity))) { // Cache is less than 3/4 or 7/8 full. Use it as-is. } #if CACHE_ALLOW_FULL_UTILIZATION elseif (capacity <= FULL_UTILIZATION_CACHE_SIZE && newOccupied + CACHE_END_MARKER <= capacity) { // Allow 100% cache utilization for small buckets. Use it as-is. } #endif else { capacity = capacity ? capacity * 2 : INIT_CACHE_SIZE; if (capacity > MAX_CACHE_SIZE) { capacity = MAX_CACHE_SIZE; } reallocate(oldCapacity, capacity, true); }
bucket_t *b = buckets(); mask_t m = capacity - 1; mask_tbegin = cache_hash(sel, m); mask_t i = begin;
// Scan for the first unused slot and insert there. // There is guaranteed to be an empty slot. do { if (fastpath(b[i].sel() == 0)) { incrementOccupied(); b[i].set<Atomic, Encoded>(b, sel, imp, cls()); return; } if (b[i].sel() == sel) { // The entry was added to the cache by some other thread // before we grabbed the cacheUpdateLock. return; } } while (fastpath((i = cache_next(i, m)) != begin));