本文主要是介绍slab memory的错误类型(1),希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
/********************************************************************/测试代码:
int slab_test(void){
void *object;
pr_err("slab_test: Cache name is %s\n", my_cachep->name);
pr_err("slab_test: Cache object size is %d\n", kmem_cache_size(my_cachep));
object = kmem_cache_alloc(my_cachep,GFP_KERNEL );
if (object){
pr_err("slab_test: get an object %p\n", object);
}
if(object){
kmem_cache_free( my_cachep, object );
}
pr_err("slab_test: check the object after free %p\n", object);
return 0;
}
/*free后object的指向的内容仍然存在,且和以前的一样,可以继续使用吗?*/
[ 4.355768:1] slab_test: Cache name is my_cache
[ 4.360284:1] slab_test: Cache object size is 32
[ 4.364918:1] slab_test: get an object ee161478
[ 4.369435:1] slab_test: check the object after free ee161478
/*可以free多次吗?如果这样做了,会有什么后果?*/
下面给出了double free的log:[ 4.395386:1] slab_test: Cache name is my_cache
[ 4.399971:1] slab_test: Cache object size is 32
[ 4.404801:1] slab_test: get an object ee1c0478
[ 4.409383:1] slab_test: check the object after free ee1c0478
当调用函数 kmem_cache_free时打印出错误信息/ dump stack;多后在回收时 crash
1.kmem_cache_free的过程
会判断 red zone,如果double free会如下面所示:[ 4.415253:1] slab error in verify_redzone_free(): cache `my_cache': double free detected
[ 4.423467:1] Backtrace:
[ 4.426234:1] [<c00121fc>] (dump_backtrace+0x0/0x110) from [<c057fa80>] (dump_stack+0x18/0x1c)
[ 4.434879:1] r6:9d74e35b r5:ee1c0470 r4:ee15aa00 r3:c07b41ac
[ 4.440954:1] [<c057fa68>] (dump_stack+0x0/0x1c) from [<c00ac7c0>] (__slab_error+0x28/0x30)
[ 4.449374:1] [<c00ac798>] (__slab_error+0x0/0x30) from [<c00ace34>] (cache_free_debugcheck+0x194/0x27c)
[ 4.458920:1] [<c00acca0>] (cache_free_debugcheck+0x0/0x27c) from [<c00ad384>] (kmem_cache_free+0x40/0x12c)
[ 4.468734:1] [<c00ad344>] (kmem_cache_free+0x0/0x12c) from [<c0254d54>] (slab_test+0x7c/0xa8)
[ 4.477422:1] [<c0254cd8>] (slab_test+0x0/0xa8) from [<c0776eb0>] (slabtest_driver_init+0x40/0x58)
[ 4.486411:1] r5:c0776e70 r4:00000000
[ 4.490306:1] [<c0776e70>] (slabtest_driver_init+0x0/0x58) from [<c000856c>] (do_one_initcall+0xb0/0x180)
[ 4.499901:1] r4:00000006
[ 4.502713:1] [<c00084bc>] (do_one_initcall+0x0/0x180) from [<c07633d4>] (kernel_init+0xec/0x1c8)
[ 4.511659:1] [<c07632e8>] (kernel_init+0x0/0x1c8) from [<c002bbcc>] (do_exit+0x0/0x750)
[ 4.519780:1] r7:00000013 r6:c002bbcc r5:c07632e8 r4:00000000
[ 4.525816:1] ee1c0470: redzone 1:0x9f911029d74e35b, redzone 2:0x9f911029d74e35b.
[ 4.533449:1] slab_test: check the object free again ee1c0478
2.当启动work: cache_reap时: dump
[ 15.022475:1] slab: double free detected in cache 'my_cache', objp ee1c0470[ 15.029452:1] ------------[ cut here ]------------
[ 15.034233:1] kernel BUG at mm/slab.c:2896!
[ 15.996133:1] Backtrace:
[ 15.998774:1] [<c00ad5b0>] (free_block+0x0/0x1e8) from [<c00ad844>] (drain_array+0xac/0xd4)
[ 16.007107:1] [<c00ad798>] (drain_array+0x0/0xd4) from [<c00ad98c>] (cache_reap+0x60/0x138)
[ 16.015427:1] r8:c105ef30 r7:c105ef30 r6:00000000 r5:ee1571e0 r4:ee15aa00
[ 16.022162:1] r3:00000000
[ 16.024983:1] [<c00ad92c>] (cache_reap+0x0/0x138) from [<c003cde4>] (process_one_work+0x26c/0x418)
[ 16.033910:1] r7:00000000 r6:c1062000 r5:c105e620 r4:ee04bce0
[ 16.039786:1] [<c003cb78>] (process_one_work+0x0/0x418) from [<c003d314>] (worker_thread+0x1bc/0x2bc)
[ 16.048982:1] [<c003d158>] (worker_thread+0x0/0x2bc) from [<c0042de8>] (kthread+0x90/0x9c)
[ 16.057230:1] [<c0042d58>] (kthread+0x0/0x9c) from [<c002bbcc>] (do_exit+0x0/0x750)
/*
* Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz
* via the workqueue/eventd.
* Add the CPU number into the expiration time to minimize the possibility of
* the CPUs getting into lockstep and contending for the global cache chain
* lock.
*/
static void __cpuinit start_cpu_timer(int cpu)
{
struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
/*
* When this gets called from do_initcalls via cpucache_init(),
* init_workqueues() has already run, so keventd will be setup
* at that time.
*/
if (keventd_up() && reap_work->work.func == NULL) {
init_reap_node(cpu);
INIT_DELAYED_WORK_DEFERRABLE(reap_work, cache_reap);
schedule_delayed_work_on(cpu, reap_work,
__round_jiffies_relative(HZ, cpu));
}
}
/**
* cache_reap - Reclaim memory from caches.
* @w: work descriptor
*
* Called from workqueue/eventd every few seconds.
* Purpose:
* - clear the per-cpu caches for this CPU.
* - return freeable pages to the main free memory pool.
*
* If we cannot acquire the cache chain mutex then just give up - we'll try
* again on the next iteration.
*/
static void cache_reap(struct work_struct *w)
{
struct kmem_cache *searchp;
struct kmem_list3 *l3;
int node = numa_mem_id();
struct delayed_work *work = to_delayed_work(w);
if (!mutex_trylock(&cache_chain_mutex))
/* Give up. Setup the next iteration. */
goto out;
list_for_each_entry(searchp, &cache_chain, next) {
check_irq_on();
/*
* We only take the l3 lock if absolutely necessary and we
* have established with reasonable certainty that
* we can do some work if the lock was obtained.
*/
l3 = searchp->nodelists[node];
reap_alien(searchp, l3);
drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
/*
* These are racy checks but it does not matter
* if we skip one check or scan twice.
*/
if (time_after(l3->next_reap, jiffies))
goto next;
l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
drain_array(searchp, l3, l3->shared, 0, node);
if (l3->free_touched)
l3->free_touched = 0;
else {
int freed;
freed = drain_freelist(searchp, l3, (l3->free_limit +
5 * searchp->num - 1) / (5 * searchp->num));
STATS_ADD_REAPED(searchp, freed);
}
next:
cond_resched();
}
check_irq_on();
mutex_unlock(&cache_chain_mutex);
next_reap_node();
out:
/* Set up the next iteration */
schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
}
/*
* Drain an array if it contains any elements taking the l3 lock only if
* necessary. Note that the l3 listlock also protects the array_cache
* if drain_array() is used on the shared array.
*/
static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
struct array_cache *ac, int force, int node)
{
int tofree;
if (!ac || !ac->avail)
return;
if (ac->touched && !force) {
ac->touched = 0;
} else {
spin_lock_irq(&l3->list_lock);
if (ac->avail) {
tofree = force ? ac->avail : (ac->limit + 4) / 5;
if (tofree > ac->avail)
tofree = (ac->avail + 1) / 2;
free_block(cachep, ac->entry, tofree, node);
ac->avail -= tofree;
memmove(ac->entry, &(ac->entry[tofree]),
sizeof(void *) * ac->avail);
}
spin_unlock_irq(&l3->list_lock);
}
}
static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
void *objp, int nodeid)
{
unsigned int objnr = obj_to_index(cachep, slabp, objp);
#if DEBUG
/* Verify that the slab belongs to the intended node */
WARN_ON(slabp->nodeid != nodeid);
/*为什么说这个条件就判断出:double free*/
if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
printk(KERN_ERR "slab: double free detected in cache "
"'%s', objp %p\n", cachep->name, objp);
BUG();
}
#endif
slab_bufctl(slabp)[objnr] = slabp->free;
slabp->free = objnr;
slabp->inuse--;
}
/********************************************************************/
这篇关于slab memory的错误类型(1)的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!