public:
// The following four fields are public for objcdt's use only.
// objcdt reaches into fields while the process is suspended
// hence doesn't care for locks and pesky little details like this
// and can safely use these.
unsigned capacity() const;// 容量
struct bucket_t *buckets() const;
Class cls() const;
#if CONFIG_USE_PREOPT_CACHES
const preopt_cache_t *preopt_cache() const;
#endif
mask_t occupied() const;// 缓存数量
struct bucket_t {
private:
// IMP-first is better for arm64e ptrauth and no worse for arm64.
// SEL-first is better for armv7* and i386 and x86_64.
#if __arm64__
explicit_atomic<uintptr_t> _imp;
explicit_atomic<SEL> _sel;
#else
explicit_atomic<SEL> _sel;
explicit_atomic<uintptr_t> _imp;
#endif
...
};
#import <objc/runtime.h>
typedef uint32_t mask_t; // x86_64 & arm64 asm are less efficient with 16-bits
struct lw_bucket_t {
SEL _sel;
IMP _imp;
};
struct lw_cache_t {
struct lw_bucket_t *_bukets;
mask_t _maybeMask;
uint16_t _flags;
uint16_t _occupied;
};
struct lw_class_data_bits_t {
uintptr_t bits;
};
// cache class
struct lw_objc_class {
Class isa;
Class superclass;
struct lw_cache_t cache;
struct lw_class_data_bits_t bits;
};
void cache_t::insert(SEL sel, IMP imp, id receiver)
{
...
// Use the cache as-is if until we exceed our expected fill ratio.
// 计数 + 1
mask_t newOccupied = occupied() + 1;
unsigned oldCapacity = capacity(), capacity = oldCapacity;
if (slowpath(isConstantEmptyCache())) {
// 判空,没有缓存创建缓存,根据架构不同,通过位运算初始化一个一定大小的容器
// Cache is read-only. Replace it.
if (!capacity) capacity = INIT_CACHE_SIZE;
reallocate(oldCapacity, capacity, /* freeOld */false);
}
else if (fastpath(newOccupied + CACHE_END_MARKER <= cache_fill_ratio(capacity))) {
// Cache is less than 3/4 or 7/8 full. Use it as-is.
// 未达到扩容临界点
}
#if CACHE_ALLOW_FULL_UTILIZATION
else if (capacity <= FULL_UTILIZATION_CACHE_SIZE && newOccupied + CACHE_END_MARKER <= capacity) {
// Allow 100% cache utilization for small buckets. Use it as-is.
// Allow 100% cache utilization for smaller cache sizes. This has the same
// advantages and disadvantages as the fill ratio. A very large percentage
// of caches end up with very few entries and the worst case of collision
// chains in small tables is relatively small.
// NOTE: objc_msgSend properly handles a cache lookup with a full cache.
//
}
#endif
else {
// 扩容操作
capacity = capacity ? capacity * 2 : INIT_CACHE_SIZE;
if (capacity > MAX_CACHE_SIZE) {
capacity = MAX_CACHE_SIZE;
}
reallocate(oldCapacity, capacity, true);
}
bucket_t *b = buckets();
mask_t m = capacity - 1;
mask_t begin = cache_hash(sel, m);
mask_t i = begin;
// Scan for the first unused slot and insert there.
// There is guaranteed to be an empty slot.
// 循环找到合适的下标
do {
if (fastpath(b[i].sel() == 0)) {
incrementOccupied();
/// 插入逻辑
b[i].set<Atomic, Encoded>(b, sel, imp, cls());
return;
}
if (b[i].sel() == sel) {
// The entry was added to the cache by some other thread
// before we grabbed the cacheUpdateLock.
return;
}
} while (fastpath((i = cache_next(i, m)) != begin));
// 未找到合适的下标 crash
bad_cache(receiver, (SEL)sel);
#endif // !DEBUG_TASK_THREADS
}
/* Initial cache bucket count. INIT_CACHE_SIZE must be a power of two. */
enum {
#if CACHE_END_MARKER || (__arm64__ && !__LP64__)
// When we have a cache end marker it fills a bucket slot, so having a
// initial cache size of 2 buckets would not be efficient when one of the
// slots is always filled with the end marker. So start with a cache size
// 4 buckets.
INIT_CACHE_SIZE_LOG2 = 2,
#else
// Allow an initial bucket size of 2 buckets, since a large number of
// classes, especially metaclasses, have very few imps, and we support
// the ability to fill 100% of the cache before resizing.
INIT_CACHE_SIZE_LOG2 = 1,
#endif
INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2),
MAX_CACHE_SIZE_LOG2 = 16,
MAX_CACHE_SIZE = (1 << MAX_CACHE_SIZE_LOG2),
FULL_UTILIZATION_CACHE_SIZE_LOG2 = 3,
FULL_UTILIZATION_CACHE_SIZE = (1 << FULL_UTILIZATION_CACHE_SIZE_LOG2),
};
// objc_msgSend has few registers available.
// Cache scan increments and wraps at special end-marking bucket.
#define CACHE_END_MARKER 1
// Historical fill ratio of 75% (since the new objc runtime was introduced).
static inline mask_t cache_fill_ratio(mask_t capacity) {
return capacity * 3 / 4;
}
#elif __arm64__ && !__LP64__
#define CACHE_END_MARKER 0
// Historical fill ratio of 75% (since the new objc runtime was introduced).
static inline mask_t cache_fill_ratio(mask_t capacity) {
return capacity * 3 / 4;
}
#elif __arm64__ && __LP64__
// objc_msgSend has lots of registers available.
// Cache scan decrements. No end marker needed.
#define CACHE_END_MARKER 0
// Allow 87.5% fill ratio in the fast path for all cache sizes.
// Increasing the cache fill ratio reduces the fragmentation and wasted space
// in imp-caches at the cost of potentially increasing the average lookup of
// a selector in imp-caches by increasing collision chains. Another potential
// change is that cache table resizes / resets happen at different moments.
static inline mask_t cache_fill_ratio(mask_t capacity) {
return capacity * 7 / 8;
}
template<Atomicity atomicity, IMPEncoding impEncoding>
void bucket_t::set(bucket_t *base, SEL newSel, IMP newImp, Class cls)
{
...
// objc_msgSend uses sel and imp with no locks.
// It is safe for objc_msgSend to see new imp but NULL sel
// (It will get a cache miss but not dispatch to the wrong place.)
// It is unsafe for objc_msgSend to see old imp and new sel.
// Therefore we write new imp, wait a lot, then write new sel.
/// 对 IMP 进行编码操作
uintptr_t newIMP = (impEncoding == Encoded
? encodeImp(base, newImp, newSel, cls)
: (uintptr_t)newImp);
...
保存
}