NSObject Lifecycle 探索

NSObject

声明

1
2
3
4
5
6
@interface NSObject <NSObject> {
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wobjc-interface-ivars"
Class isa OBJC_ISA_AVAILABILITY;
#pragma clang diagnostic pop
}

NSObject 包含Class isa,我们来看下Class的定义

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
typedef struct objc_class *Class;

struct objc_class : objc_object {
// Class ISA;
Class superclass;
cache_t cache; // formerly cache pointer and vtable
class_data_bits_t bits; // class_rw_t * plus custom rr/alloc flags

...
};

struct cache_t {
struct bucket_t *_buckets;
mask_t _mask;
mask_t _occupied;

public:
...

/* cache 扩容相关 */
mask_t capacity();
...
void expand();

...
};

/* method cache */
struct bucket_t {
private:
cache_key_t _key;
IMP _imp;

...
};

struct objc_object {
private:
isa_t isa;

...

/* 初始化isa指针 */

void initInstanceIsa(Class cls, bool hasCxxDtor);

// changeIsa() should be used to change the isa of existing objects.
// If this is a new object, use initIsa() for performance.

/* KVO 实现通过该方法指向新的实现类的isa */

Class changeIsa(Class newCls);

...
};

union isa_t
{
isa_t() { }
isa_t(uintptr_t value) : bits(value) { }

Class cls;
uintptr_t bits;

...
};

创建

初始化Class (Runtime自动创建)
1
2
3
4
5
6
static Class 
alloc_class_for_subclass(Class supercls, size_t extraBytes) {
...

return (Class) calloc(1, sizeof(objc_class) + extraBytes)
}
调用alloc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
+ (id)alloc {
return return callAlloc(self, false/*checkNil*/, true/*allocWithZone*/);;
}

static ALWAYS_INLINE id
callAlloc(Class cls, bool checkNil, bool allocWithZone=false)
{
...

id obj = (id)calloc(1, size);
obj->initInstanceIsa(cls, hasCxxDtor);
...
}

+ (id)allocWithZone:(struct _NSZone *)zone {

}
/* 最终的实现跟alloc一样,有兴趣的自己去查看吧 */
调用init
1
2
3
4
5
6
7
- (id)init {
return _objc_rootInit(self);
}

+ (id)new {
return [callAlloc(self, false/*checkNil*/) init];
}

从上面的代码可以看出new就是[[Class alloc] init]

retain

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
- (id)retain {
return ((id)self)->rootRetain();
}

ALWAYS_INLINE id
objc_object::rootRetain(bool tryRetain, bool handleOverflow)
{
if (isTaggedPointer()) return (id)this;

bool sideTableLocked = false;
bool transcribeToSideTable = false;

isa_t oldisa;
isa_t newisa;

do {
transcribeToSideTable = false;
oldisa = LoadExclusive(&isa.bits);
newisa = oldisa;

/* 不支持 nonpointer 会走这里 引用计数加1 */

if (slowpath(!newisa.nonpointer)) {
ClearExclusive(&isa.bits);
if (!tryRetain && sideTableLocked) sidetable_unlock();
if (tryRetain) return sidetable_tryRetain() ? (id)this : nil;
else return sidetable_retain();
}
// don't check newisa.fast_rr; we already called any RR overrides

/* NSObject dealloc */

if (slowpath(tryRetain && newisa.deallocating)) {
ClearExclusive(&isa.bits);
if (!tryRetain && sideTableLocked) sidetable_unlock();
return nil;
}
uintptr_t carry;
newisa.bits = addc(newisa.bits, RC_ONE, 0, &carry); // extra_rc++

if (slowpath(carry)) {
// newisa.extra_rc++ overflowed
if (!handleOverflow) {
ClearExclusive(&isa.bits);
return rootRetain_overflow(tryRetain);
}
// Leave half of the retain counts inline and
// prepare to copy the other half to the side table.
if (!tryRetain && !sideTableLocked) sidetable_lock();
sideTableLocked = true;
transcribeToSideTable = true;
newisa.extra_rc = RC_HALF;
newisa.has_sidetable_rc = true;
}
} while (slowpath(!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)));

if (slowpath(transcribeToSideTable)) {
// Copy the other half of the retain counts to the side table.

/* 引用计数加1 */

sidetable_addExtraRC_nolock(RC_HALF) = {
assert(isa.nonpointer);
SideTable& table = SideTables()[this];

size_t& refcntStorage = table.refcnts[this];
size_t oldRefcnt = refcntStorage;
// isa-side bits should not be set here
assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);

if (oldRefcnt & SIDE_TABLE_RC_PINNED) return true;

uintptr_t carry;
size_t newRefcnt =
addc(oldRefcnt, delta_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
if (carry) {
refcntStorage =
SIDE_TABLE_RC_PINNED | (oldRefcnt & SIDE_TABLE_FLAG_MASK);
return true;
}
else {
refcntStorage = newRefcnt;
return false;
}
};
}

if (slowpath(!tryRetain && sideTableLocked)) sidetable_unlock();
return (id)this;
}

bool
objc_object::sidetable_tryRetain()
{
SideTable& table = SideTables()[this];

bool result = true;

/*
找到NSObject isa对应的引用计数
1.引用计数为0时,新引用计数为1
2.对象正在被释放,返回retain失败
3.引用计数>0时,新引用计数加1
*/

RefcountMap::iterator it = table.refcnts.find(this);
if (it == table.refcnts.end()) {
table.refcnts[this] = SIDE_TABLE_RC_ONE;
} else if (it->second & SIDE_TABLE_DEALLOCATING) {
result = false;
} else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
it->second += SIDE_TABLE_RC_ONE;
}

return result;

release

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
- (oneway void)release {
((id)self)->rootRelease();
}

ALWAYS_INLINE bool
objc_object::rootRelease(bool performDealloc, bool handleUnderflow)
{
...

/* 引用计数减一 */

size_t borrowed = sidetable_subExtraRC_nolock(RC_HALF) = {
assert(isa.nonpointer);
SideTable& table = SideTables()[this];

RefcountMap::iterator it = table.refcnts.find(this);
if (it == table.refcnts.end() || it->second == 0) {
// Side table retain count is zero. Can't borrow.
return 0;
}
size_t oldRefcnt = it->second;

// isa-side bits should not be set here
assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);

size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT);
assert(oldRefcnt > newRefcnt); // shouldn't underflow
it->second = newRefcnt;
return delta_rc;
};

...

// Really deallocate.
/* 如果正在dealloc,清除isa */

if (slowpath(newisa.deallocating)) {
ClearExclusive(&isa.bits);
if (sideTableLocked) sidetable_unlock();
return overrelease_error();
// does not actually return
}
newisa.deallocating = true;
if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;

if (slowpath(sideTableLocked)) sidetable_unlock();

/* 发SEL_dealloc消息,自动释放池Check引用计数,当引用计数为0时,释放该对象 */

__sync_synchronize();
if (performDealloc) {
((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc);
}
return true;
}

autorelease

1
2
3
4
5
6
7
8
9
10
- (id)autorelease {
return ((id)self)->rootAutorelease();
}

/* 以Page的方式加入自动释放池中 */
inline id
objc_object::rootAutorelease()
{
return AutoreleasePoolPage::autorelease((id)this);
}

autoreleasePool

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
static inline void *push() {
id *dest;
if (DebugPoolAllocation) {
// Each autorelease pool starts on a new pool page.
dest = autoreleaseNewPage(POOL_BOUNDARY);
} else {
dest = autoreleaseFast(POOL_BOUNDARY);
}
assert(dest == EMPTY_POOL_PLACEHOLDER || *dest == POOL_BOUNDARY);
return dest;
}

static inline void pop(void *token) {
...
/* 
从hotPage()中找到不为空的Page页,置空
若为对象,调用其release方法
找到下一个不为空的Page页
*/

page->releaseUntil(stop) = {
// Not recursive: we don't want to blow out the stack
// if a thread accumulates a stupendous amount of garbage

while (this->next != stop) {
// Restart from hotPage() every time, in case -release
// autoreleased more objects
AutoreleasePoolPage *page = hotPage();

// fixme I think this `while` can be `if`, but I can't prove it
while (page->empty()) {
page = page->parent;
setHotPage(page);
}

page->unprotect();
id obj = *--page->next;
memset((void*)page->next, SCRIBBLE, sizeof(*page->next));
page->protect();

if (obj != POOL_BOUNDARY) {
objc_release(obj);
}
}

setHotPage(this);

#if DEBUG
// we expect any children to be completely empty
for (AutoreleasePoolPage *page = child; page; page = page->child) {
assert(page->empty());
}
#endif
};

// memory: 删除空的AutoreleasePage
if (DebugPoolAllocation && page->empty()) {
// special case: delete everything during page-per-pool debugging
AutoreleasePoolPage *parent = page->parent;
page->kill();
setHotPage(parent);
} else if (DebugMissingPools && page->empty() && !page->parent) {
// special case: delete everything for pop(top)
// when debugging missing autorelease pools
page->kill();
setHotPage(nil);
}
else if (page->child) {
// hysteresis: keep one empty child if page is more than half full
if (page->lessThanHalfFull()) {
page->child->kill();
}
else if (page->child->child) {
page->child->child->kill();
}
}
}

上面的retain/release都提到了SideTable结构体,其实例是全局的。SideTable管理引用计数的增加与减少和weak指针。

SideTable

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
struct SideTable {
spinlock_t slock; //自旋锁,SideTable的方法是线程安全的
RefcountMap refcnts; //引用计数管理
weak_table_t weak_table; //weak指针管理

...
};

/**
* The global weak references table. Stores object ids as keys,
* and weak_entry_t structs as their values.
*/
struct weak_table_t {
weak_entry_t *weak_entries;
size_t num_entries;
uintptr_t mask;
uintptr_t max_hash_displacement;
};

struct weak_entry_t {
DisguisedPtr<objc_object> referent;
union {
struct {
weak_referrer_t *referrers;
uintptr_t out_of_line_ness : 2;
uintptr_t num_refs : PTR_MINUS_2;
uintptr_t mask;
uintptr_t max_hash_displacement;
};
struct {
// out_of_line_ness field is low bits of inline_referrers[1]
weak_referrer_t inline_referrers[WEAK_INLINE_COUNT];
};
};

...
};

引用计数上面已经介绍过了,下面再说说weak references table

weak references table

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
/** 
* Initialize a fresh weak pointer to some object location.
* It would be used for code like:
*
* (The nil case)
* __weak id weakPtr;
* (The non-nil case)
* NSObject *o = ...;
* __weak id weakPtr = o;
*
* This function IS NOT thread-safe with respect to concurrent
* modifications to the weak variable. (Concurrent weak clear is safe.)
*
* @param location Address of __weak ptr.
* @param newObj Object ptr.
*/

id objc_initWeak(id *location, id newObj) {
if (!newObj) {
*location = nil;
return nil;
}

return storeWeak<DontHaveOld, DoHaveNew, DoCrashIfDeallocating>
(location, (objc_object*)newObj);
}

// If HaveOld is true, the variable has an existing value
// that needs to be cleaned up. This value might be nil.
// If HaveNew is true, there is a new value that needs to be
// assigned into the variable. This value might be nil.
// If CrashIfDeallocating is true, the process is halted if newObj is
// deallocating or newObj's class does not support weak references.
// If CrashIfDeallocating is false, nil is stored instead.
/*
如果HavOld为真,说明有旧的weak对象要清理
如果HavNew为真,说明有新的weak对象要加入
CrashIfDeallocating为真时,不能加入正在被释放的对象;为假时,清理该对象的weak references table
*/
static id storeWeak(id *location, objc_object *newObj) {
assert(haveOld || haveNew);
if (!haveNew) assert(newObj == nil);

Class previouslyInitializedClass = nil;
id oldObj;
SideTable *oldTable;
SideTable *newTable;

// Acquire locks for old and new values.
// Order by lock address to prevent lock ordering problems.
// Retry if the old value changes underneath us.
retry:
if (haveOld) {
oldObj = *location;
oldTable = &SideTables()[oldObj];
} else {
oldTable = nil;
}
if (haveNew) {
newTable = &SideTables()[newObj];
} else {
newTable = nil;
}

SideTable::lockTwo<haveOld, haveNew>(oldTable, newTable);

if (haveOld && *location != oldObj) {
SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
goto retry;
}

// Prevent a deadlock between the weak reference machinery
// and the +initialize machinery by ensuring that no
// weakly-referenced object has an un-+initialized isa.
if (haveNew && newObj) {
Class cls = newObj->getIsa();
if (cls != previouslyInitializedClass &&
!((objc_class *)cls)->isInitialized())
{
SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
_class_initialize(_class_getNonMetaClass(cls, (id)newObj));

// If this class is finished with +initialize then we're good.
// If this class is still running +initialize on this thread
// (i.e. +initialize called storeWeak on an instance of itself)
// then we may proceed but it will appear initializing and
// not yet initialized to the check above.
// Instead set previouslyInitializedClass to recognize it on retry.
previouslyInitializedClass = cls;

goto retry;
}
}

// Clean up old value, if any.
if (haveOld) {
weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
}

// Assign new value, if any.
if (haveNew) {
newObj = (objc_object *)
weak_register_no_lock(&newTable->weak_table, (id)newObj, location,
crashIfDeallocating);
// weak_register_no_lock returns nil if weak store should be rejected

// Set is-weakly-referenced bit in refcount table.
if (newObj && !newObj->isTaggedPointer()) {
newObj->setWeaklyReferenced_nolock();
}

// Do not set *location anywhere else. That would introduce a race.
*location = (id)newObj;
}
else {
// No new value. The storage is not changed.
}

SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);

return (id)newObj;
}
weak_register_no_lock
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
id  weak_register_no_lock(weak_table_t *weak_table, id referent_id, 
id *referrer_id, bool crashIfDeallocating) {
objc_object *referent = (objc_object *)referent_id;
objc_object **referrer = (objc_object **)referrer_id;

if (!referent || referent->isTaggedPointer()) return referent_id;

// 确保正在被加入的对象不能是正在被释放的

if (!referent->ISA()->hasCustomRR()) {
deallocating = referent->rootIsDeallocating();
}
else {
BOOL (*allowsWeakReference)(objc_object *, SEL) =
(BOOL(*)(objc_object *, SEL))
object_getMethodImplementation((id)referent,
SEL_allowsWeakReference);
if ((IMP)allowsWeakReference == _objc_msgForward) {
return nil;
}
deallocating =
! (*allowsWeakReference)(referent, SEL_allowsWeakReference);
}

if (deallocating) {
if (crashIfDeallocating) {
_objc_fatal("Cannot form weak reference to instance (%p) of "
"class %s. It is possible that this object was "
"over-released, or is in the process of deallocation.",
(void*)referent, object_getClassName((id)referent));
} else {
return nil;
}
}

/*
加入到weak引用列表里
1.如果已经有了weak引用,追加到weak_entry_t的referrers里
2.如果没有weak引用,生成新的weak_entry_t,加入到weak引用列表里
*/
weak_entry_t *entry;
if ((entry = weak_entry_for_referent(weak_table, referent))) {
append_referrer(entry, referrer);
}
else {
weak_entry_t new_entry(referent, referrer);

/* weak_table扩容策略,当为原来table大小的3/4时,创建一个新的(扩大一倍,初始值是64),旧数据加入到新的table里 */

weak_grow_maybe(weak_table);
weak_entry_insert(weak_table, &new_entry);
}

// Do not set *referrer. objc_storeWeak() requires that the
// value not change.

return referent_id;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
static void append_referrer(weak_entry_t *entry, objc_object **new_referrer)
{
if (! entry->out_of_line()) {
// Try to insert inline.
for (size_t i = 0; i < WEAK_INLINE_COUNT; i++) {
if (entry->inline_referrers[i] == nil) {
entry->inline_referrers[i] = new_referrer;
return;
}
}

// Couldn't insert inline. Allocate out of line.
weak_referrer_t *new_referrers = (weak_referrer_t *)
calloc(WEAK_INLINE_COUNT, sizeof(weak_referrer_t));
// This constructed table is invalid, but grow_refs_and_insert
// will fix it and rehash it.
for (size_t i = 0; i < WEAK_INLINE_COUNT; i++) {
new_referrers[i] = entry->inline_referrers[i];
}
entry->referrers = new_referrers;
entry->num_refs = WEAK_INLINE_COUNT;
entry->out_of_line_ness = REFERRERS_OUT_OF_LINE;
entry->mask = WEAK_INLINE_COUNT-1;
entry->max_hash_displacement = 0;
}

assert(entry->out_of_line());

if (entry->num_refs >= TABLE_SIZE(entry) * 3/4) {
return grow_refs_and_insert(entry, new_referrer);
}
size_t begin = w_hash_pointer(new_referrer) & (entry->mask);
size_t index = begin;
size_t hash_displacement = 0;
while (entry->referrers[index] != nil) {
hash_displacement++;
index = (index+1) & entry->mask;
if (index == begin) bad_weak_table(entry);
}
if (hash_displacement > entry->max_hash_displacement) {
entry->max_hash_displacement = hash_displacement;
}
weak_referrer_t &ref = entry->referrers[index];
ref = new_referrer;
entry->num_refs++;
}


static void weak_entry_insert(weak_table_t *weak_table, weak_entry_t *new_entry)
{
weak_entry_t *weak_entries = weak_table->weak_entries;
assert(weak_entries != nil);

size_t begin = hash_pointer(new_entry->referent) & (weak_table->mask);
size_t index = begin;
size_t hash_displacement = 0;
while (weak_entries[index].referent != nil) {
index = (index+1) & weak_table->mask;
if (index == begin) bad_weak_table(weak_entries);
hash_displacement++;
}

weak_entries[index] = *new_entry;
weak_table->num_entries++;

if (hash_displacement > weak_table->max_hash_displacement) {
weak_table->max_hash_displacement = hash_displacement;
}
}

static inline uintptr_t hash_pointer(objc_object *key) {
return ptr_hash((uintptr_t)key);
}

static inline uint32_t ptr_hash(uint64_t key)
{
key ^= key >> 4;
key *= 0x8a970be7488fda55;
key ^= __builtin_bswap64(key);
return (uint32_t)key;
}

从上面的代码可以看出:weak_table_t使用对象的hash_pointer值作为index,weak_entry_t作为value。

weak_unregister_no_lock
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
/* 清理已注册的weak引用列表 */

void weak_unregister_no_lock(weak_table_t *weak_table, id referent_id,
id *referrer_id) {
objc_object *referent = (objc_object *)referent_id;
objc_object **referrer = (objc_object **)referrer_id;

weak_entry_t *entry;

if (!referent) return;

if ((entry = weak_entry_for_referent(weak_table, referent))) {
remove_referrer(entry, referrer);
bool empty = true;
if (entry->out_of_line() && entry->num_refs != 0) {
empty = false;
}
else {
for (size_t i = 0; i < WEAK_INLINE_COUNT; i++) {
if (entry->inline_referrers[i]) {
empty = false;
break;
}
}
}

if (empty) {
weak_entry_remove(weak_table, entry);
}
}

// Do not set *referrer = nil. objc_storeWeak() requires that the
// value not change.
}

dealloc

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
- (void)dealloc {
self->rootDealloc();
}

inline void objc_object::rootDealloc() {
if (isTaggedPointer()) return; // fixme necessary?

if (fastpath(isa.nonpointer &&
!isa.weakly_referenced &&
!isa.has_assoc &&
!isa.has_cxx_dtor &&
!isa.has_sidetable_rc))
{
assert(!sidetable_present());
free(this);
}
else {
object_dispose((id)this) = {
if (!obj) return nil;

objc_destructInstance(obj) = {
if (obj) {
// Read all of the flags at once for performance.
bool cxx = obj->hasCxxDtor();
bool assoc = obj->hasAssociatedObjects();

// This order is important.
/*
如果有C++成员,释放C++成员
如果有关联对象,释放关联对象
*/
if (cxx) object_cxxDestruct(obj);
if (assoc) _object_remove_assocations(obj);
obj->clearDeallocating() = {
if (slowpath(!isa.nonpointer)) {
// Slow path for raw pointer isa.
sidetable_clearDeallocating();
}
else if (slowpath(isa.weakly_referenced || isa.has_sidetable_rc)) {
// Slow path for non-pointer isa with weak refs and/or side table data.
clearDeallocating_slow();
}

assert(!sidetable_present());
};
}

return obj;
};
free(obj);

return nil;
};
}
}

最终都会调用对象的SideTable的weak_clear_no_lock方法清理掉该对象的weak_entry_t以及weak_entry_t里的所有weak引用置nil。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
void weak_clear_no_lock(weak_table_t *weak_table, id referent_id) {
objc_object *referent = (objc_object *)referent_id;

weak_entry_t *entry = weak_entry_for_referent(weak_table, referent);
if (entry == nil) {
/// XXX shouldn't happen, but does with mismatched CF/objc
//printf("XXX no entry for clear deallocating %p\n", referent);
return;
}

// zero out references
weak_referrer_t *referrers;
size_t count;

if (entry->out_of_line()) {
referrers = entry->referrers;
count = TABLE_SIZE(entry);
}
else {
referrers = entry->inline_referrers;
count = WEAK_INLINE_COUNT;
}

for (size_t i = 0; i < count; ++i) {
objc_object **referrer = referrers[i];
if (referrer) {
if (*referrer == referent) {
*referrer = nil;
}
else if (*referrer) {
_objc_inform("__weak variable at %p holds %p instead of %p. "
"This is probably incorrect use of "
"objc_storeWeak() and objc_loadWeak(). "
"Break on objc_weak_error to debug.\n",
referrer, (void*)*referrer, (void*)referent);
objc_weak_error();
}
}
}

weak_entry_remove(weak_table, entry);
}
-------------本文结束感谢您的阅读-------------