glibc_2.35(版本较高,注意版本区别)

TcacheBin

tcache全称 thread local caching,TcacheBin是从glibc2.26才开始加入的缓存机制,访问速度比fastbin更快,优先级更高,相对的检查机制也比较弱,容易攻击。

TcacheBin相关数据结构

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
#if USE_TCACHE
/* We want 64 entries. This is an arbitrary limit, which tunables can reduce. */
# define TCACHE_MAX_BINS 64
# define MAX_TCACHE_SIZE tidx2usize (TCACHE_MAX_BINS-1)

/* Only used to pre-fill the tunables. */
# define tidx2usize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ)

/* When "x" is from chunksize(). */
# define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)
/* When "x" is a user-provided size. */
# define usize2tidx(x) csize2tidx (request2size (x))

/* With rounding and alignment, the bins are...
idx 0 bytes 0..24 (64-bit) or 0..12 (32-bit)
idx 1 bytes 25..40 or 13..20
idx 2 bytes 41..56 or 21..28
etc. */

/* This is another arbitrary limit, which tunables can change. Each
tcache bin will hold at most this number of chunks. */
# define TCACHE_FILL_COUNT 7

/* Maximum chunks in tcache bins for tunables. This value must fit the range
of tcache->counts[] entries, else they may overflow. */
# define MAX_TCACHE_COUNT UINT16_MAX
#endif


/* We overlay this structure on the user-data portion of a chunk when
the chunk is stored in the per-thread cache. */
typedef struct tcache_entry
{
struct tcache_entry *next;
/* This field exists to detect double frees. */
uintptr_t key;
} tcache_entry;

/* There is one of these for each thread, which contains the
per-thread cache (hence "tcache_perthread_struct"). Keeping
overall size low is mildly important. Note that COUNTS and ENTRIES
are redundant (we could have just counted the linked list each
time), this is for performance reasons. */
typedef struct tcache_perthread_struct
{
uint16_t counts[TCACHE_MAX_BINS];
tcache_entry *entries[TCACHE_MAX_BINS];
} tcache_perthread_struct;

static __thread bool tcache_shutting_down = false;
static __thread tcache_perthread_struct *tcache = NULL;

/* Process-wide key to try and catch a double-free in the same thread. */
static uintptr_t tcache_key;

static void
tcache_key_initialize (void)
{
if (__getrandom (&tcache_key, sizeof(tcache_key), GRND_NONBLOCK)
!= sizeof (tcache_key))
{
tcache_key = random_bits ();
#if __WORDSIZE == 64
tcache_key = (tcache_key << 32) | random_bits ();
#endif
}
}

每个线程都会被分配一个TcacheBin数组,数组大小为64,也就是每个TcacheBin里会有64个bin单向链表,每个bin最多可以缓存7个相同大小的空闲chunk。chunk在64位机器以16字节递增,从32到1024(MAX_TCACHE_COUNT)字节。在32位机器上以8字节递增,从12到512字节。TcacheBin由tcache_perthread_struct结构体维护,大小是0x290,放在堆头;counts数组记录了每个bin上chunk的数量,entries数组记录每个bin的地址。
tcache_entry结构体用来连接空闲的chunk结构体形成链表。在这里有几个需要注意的问题,其一是next指针指向的是同一个bin中下一个chunk(大小一定相同的chunk)的user_data处(也就是mem),而在fastbin中chunk的fd指针的是下一个chunk的头部,即prev_size处;其二是key是为了防止double free而从glibc2.29才开始加入的,在glibc2.34前key是指向TcacheBin的指针,储存在空闲chunk的bk位置上,而2.34之后的key是由tcache_key_initialize函数生成的,一个线程生成一个key。

tidx2usize(idx)通过bin索引计算chunk大小
csize2tidx(x)通过chunk大小找到相应的bin索引
usize2tidx(x)通过用户的需求size计算相应的bin索引

TcacheBin有很多特性和FastBin很像,LIFO的单向链表结构,PREV_INUSE标志位不清零,严格限制每个bin内chunk的大小相同,且chunk没法在tcachebin内合并。

TcacheBin初始化

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
static void
tcache_init(void)
{
mstate ar_ptr;
void *victim = 0;
const size_t bytes = sizeof (tcache_perthread_struct);

if (tcache_shutting_down)
return;

arena_get (ar_ptr, bytes);
victim = _int_malloc (ar_ptr, bytes); //分配内存给tcache_perthread_struct
if (!victim && ar_ptr != NULL) //如果分配失败则尝试再分配一次
{
ar_ptr = arena_get_retry (ar_ptr, bytes);
victim = _int_malloc (ar_ptr, bytes);
}


if (ar_ptr != NULL)
__libc_lock_unlock (ar_ptr->mutex); //释放一个互斥锁

/* In a low memory situation, we may not be able to allocate memory
- in which case, we just keep trying later. However, we
typically do this very early, so either there is sufficient
memory, or there isn't enough memory to do non-trivial
allocations anyway. */
/* 在内存不足的情况下,我们可能无法分配内存 -这样的话,我们稍后再试。(๑ゝڡ◕๑) */
if (victim)
{
tcache = (tcache_perthread_struct *) victim;
memset (tcache, 0, sizeof (tcache_perthread_struct));
}

}

TcacheBin释放

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
static void
tcache_thread_shutdown (void)
{
int i;
tcache_perthread_struct *tcache_tmp = tcache;

tcache_shutting_down = true;

if (!tcache)
return;

/* Disable the tcache and prevent it from being reinitialized. */
tcache = NULL; //将TcacheBin堆头置空

/* Free all of the entries and the tcache itself back to the arena
heap for coalescing. */
for (i = 0; i < TCACHE_MAX_BINS; ++i)
{
while (tcache_tmp->entries[i])
{
tcache_entry *e = tcache_tmp->entries[i]; //释放每一个bin
if (__glibc_unlikely (!aligned_OK (e))) //检查chunk对齐
malloc_printerr ("tcache_thread_shutdown(): "
"unaligned tcache chunk detected");
tcache_tmp->entries[i] = REVEAL_PTR (e->next);
__libc_free (e);
}
}

__libc_free (tcache_tmp); //释放临时堆头
}

TcacheBin存取chunk

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
/* Caller must ensure that we know tc_idx is valid and there's room
for more chunks. */
static __always_inline void
tcache_put (mchunkptr chunk, size_t tc_idx) //空闲chunk存入tcachebin
{
tcache_entry *e = (tcache_entry *) chunk2mem (chunk);

/* Mark this chunk as "in the tcache" so the test in _int_free will
detect a double free. */
e->key = tcache_key; //防止double free的key

e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]); //将当前bin头部chunk的指针加密后赋给next
tcache->entries[tc_idx] = e; //将这个chunk存进相应索引的bin链表头部(更新bin头部)
++(tcache->counts[tc_idx]); //chunk计数器+1
}


/* Caller must ensure that we know tc_idx is valid and there's
available chunks to remove. */
static __always_inline void *
tcache_get (size_t tc_idx) //从tcachebin取出chunk
{
tcache_entry *e = tcache->entries[tc_idx]; //根据计算好的索引取出链表头部的chunk
if (__glibc_unlikely (!aligned_OK (e))) //检查chunk对齐
malloc_printerr ("malloc(): unaligned tcache chunk detected");
tcache->entries[tc_idx] = REVEAL_PTR (e->next); //将头部地址改成下一个chunk
--(tcache->counts[tc_idx]); //计数器-1
e->key = 0; //key位置置空
return (void *) e;
}

当chunk进入tcachebin时,它会被赋予这个TcacheBin的key,意味着这个chunk已经加入tcachebin了,当我们想要进行double free时,free会检查这个key是否存在,存在则说明double free了,所以要想办法绕过key的检查。而在2.28版本之前想进行double free是相当方便的,可以直接连续free。

这里还有一个问题:可以注意到在维护next成员的时候用了一个叫做PROTECT_PTR的函数,在维护entries的时候有一个REVEAL_PTR函数。我们对它们进行溯源:

1
2
3
#define PROTECT_PTR(pos, ptr) \
((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr)))
#define REVEAL_PTR(ptr) PROTECT_PTR (&ptr, ptr)

发现tcache对fd进行了一定的位运算后才存到chunk上,来当做一个简单的加密。这是从2.32版本才开始引入的机制(但是感觉有点掩耳盗铃的意思)。简单来说就是把当前存fd的地址右移12之后再和fd异或,就得到了一个加密的fd。

执行free的时候TcacheBin对chunk的检查机制

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
#if USE_TCACHE
{
size_t tc_idx = csize2tidx (size);
if (tcache != NULL && tc_idx < mp_.tcache_bins)
{
/* Check to see if it's already in the tcache. */
tcache_entry *e = (tcache_entry *) chunk2mem (p);

/* This test succeeds on double free. However, we don't 100%
trust it (it also matches random payload data at a 1 in
2^<size_t> chance), so verify it's not an unlikely
coincidence before aborting. */
if (__glibc_unlikely (e->key == tcache_key))
{
tcache_entry *tmp;
size_t cnt = 0;
LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
for (tmp = tcache->entries[tc_idx];
tmp;
tmp = REVEAL_PTR (tmp->next), ++cnt)
{
if (cnt >= mp_.tcache_count)
malloc_printerr ("free(): too many chunks detected in tcache");
if (__glibc_unlikely (!aligned_OK (tmp))) //对chunk对齐的检查
malloc_printerr ("free(): unaligned chunk detected in tcache 2");
if (tmp == e) //对double free的检查
malloc_printerr ("free(): double free detected in tcache 2");
/* If we get here, it was a coincidence. We've wasted a
few cycles, but don't abort. */
}
}

if (tcache->counts[tc_idx] < mp_.tcache_count) //bin没满
{
tcache_put (p, tc_idx);
return;
}
}
}
#endif

不难发现,tcache虽然检查较少,但是相对于低版本,高版本会对double free和chunk对齐进行检查。

Stashing机制

见另一篇文章

参考阅读:
- 关于如何理解Glibc堆管理器(Ⅶ——Tcache Bins!!)
- TcacheBin的相关知识以及漏洞利用
- glibc源码在线阅读
- glibc源码下载

⬆︎TOP