diff --git a/src/Makefile b/src/Makefile index 38dbf6d..0e584de 100644 --- a/src/Makefile +++ b/src/Makefile @@ -22,7 +22,7 @@ objs-$(CONFIG_SCHEDSTATS) += stats.o objs-$(CONFIG_SCHED_DEBUG) += debug.o obj-m += scheduler.o -scheduler-objs := $(objs-y) $(sidecar_objs) main.o sched_rebuild.o +scheduler-objs := $(objs-y) $(sidecar_objs) main.o tainted.o mempool.o sched_rebuild.o search_cb := {if (/CALLBACK/) {print "__cb_"$$2} else {print $$2}} search_und := grep "UND __mod_" | awk '{print substr($$8,7)}' | sort | uniq diff --git a/src/head_jump.h b/src/head_jump.h index 30467d0..817a615 100644 --- a/src/head_jump.h +++ b/src/head_jump.h @@ -6,8 +6,7 @@ #ifndef __HEAD_JUMP_H #define __HEAD_JUMP_H -#include -#include +#include "helper.h" #define EXPORT_SIDECAR(fn, file, ...) EXPORT_PLUGSCHED(fn, __VA_ARGS__) #define EXPORT_CALLBACK EXPORT_PLUGSCHED @@ -53,8 +52,8 @@ static unsigned long mod_func_size[NR_INTERFACE_FN]; * 2) For functions set: * 1. Add the function to export_jump.h file * 2. Call jump_init_all() to init all functions data - * 3. Use JUMP_OPERATION(install) macro to replace the functions set - * 4. Use JUMP_OPERATION(remove) macro to restore the functions set + * 3. Use jump_install() to replace the functions set + * 4. Use jump_remove() to restore the functions set */ #ifdef CONFIG_X86_64 @@ -93,14 +92,6 @@ extern void __orig___fentry__(void); #define JUMP_REMOVE_FUNC(func) \ memcpy((unsigned char *)__orig_##func, store_orig_##func, HEAD_LEN) - -/* Must be used in stop machine context */ -#define JUMP_OPERATION(ops) do { \ - void *unused = disable_write_protect(NULL); \ - jump_##ops(); \ - enable_write_protect(); \ - } while(0) - #else /* For ARM64 */ #define DEFINE_JUMP_FUNC(func) \ static u32 store_orig_##func; \ @@ -122,10 +113,6 @@ extern void __orig___fentry__(void); #define JUMP_REMOVE_FUNC(func) \ aarch64_insn_patch_text_nosync(__orig_##func, store_orig_##func) -#define JUMP_OPERATION(ops) do { \ - jump_##ops(); \ - } while(0) - #endif /* CONFIG_X86_64 */ #define EXPORT_PLUGSCHED(fn, ...) DEFINE_JUMP_FUNC(fn); @@ -135,14 +122,18 @@ extern void __orig___fentry__(void); #define EXPORT_PLUGSCHED(fn, ...) JUMP_INSTALL_FUNC(fn); static inline void jump_install(void) { + disable_write_protect_global(); #include "export_jump.h" + enable_write_protect_global(); } #undef EXPORT_PLUGSCHED #define EXPORT_PLUGSCHED(fn, ...) JUMP_REMOVE_FUNC(fn); static inline void jump_remove(void) { + disable_write_protect_global(); #include "export_jump.h" + enable_write_protect_global(); } #undef EXPORT_PLUGSCHED diff --git a/src/helper.h b/src/helper.h index 6e5b631..dc0a405 100644 --- a/src/helper.h +++ b/src/helper.h @@ -1,6 +1,8 @@ // Copyright 2019-2022 Alibaba Group Holding Limited. // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +#ifndef __HELPER_H +#define __HELPER_H /* * helper function to communicate with vmlinux @@ -14,6 +16,16 @@ static inline void do_write_cr0(unsigned long val) asm volatile("mov %0,%%cr0": "+r" (val) : : "memory"); } +static inline void *disable_write_protect_global(void) +{ + return disable_write_protect(NULL); +} + +static inline void enable_write_protect_global(void) +{ + enable_write_protect(); +} + static inline void *disable_write_protect(void *addr) { BUG_ON(orig_cr0); @@ -37,6 +49,9 @@ static inline void enable_write_protect(void) #include #include +static inline void *disable_write_protect_global(void) { } +static inline void enable_write_protect_global(void) { } + static void *disable_write_protect(void *addr) { unsigned long uintaddr = (uintptr_t) addr; @@ -147,3 +162,4 @@ static void addr_sort(unsigned long *addr, unsigned long *size, int n) { } } } +#endif \ No newline at end of file diff --git a/src/main.c b/src/main.c index 8e08642..69d360c 100644 --- a/src/main.c +++ b/src/main.c @@ -17,6 +17,7 @@ #include "mempool.h" #include "head_jump.h" #include "stack_check.h" +#include "tainted.h" #define CHECK_STACK_LAYOUT() \ BUILD_BUG_ON_MSG(MODULE_FRAME_POINTER != VMLINUX_FRAME_POINTER, \ @@ -70,23 +71,6 @@ extern void switch_sched_class(bool mod); static int scheduler_enable = 0; struct kobject *plugsched_dir, *plugsched_subdir, *vmlinux_moddir; -struct tainted_function { - char *name; - struct kobject *kobj; -}; - -#undef TAINTED_FUNCTION -#define TAINTED_FUNCTION(func,sympos) \ - { \ - .name = #func "," #sympos, \ - .kobj = NULL, \ - }, - -struct tainted_function tainted_functions[] = { - #include "tainted_functions.h" - {} -}; - static inline void parallel_state_check_init(void) { atomic_set(&cpu_finished, num_online_cpus()); @@ -188,7 +172,7 @@ static int __sync_sched_install(void *arg) if (is_first_process()) { switch_sched_class(true); - JUMP_OPERATION(install); + jump_install(); disable_stack_protector(); sched_alloc_extrapad(); reset_balance_callback(); @@ -236,7 +220,7 @@ static int __sync_sched_restore(void *arg) if (is_first_process()) { switch_sched_class(false); - JUMP_OPERATION(remove); + jump_remove(); reset_balance_callback(); sched_free_extrapad(); } @@ -536,30 +520,6 @@ static void unregister_plugsched_enable(void) kobject_put(plugsched_dir); } -static int register_tainted_functions(void) -{ - struct tainted_function *tf; - - for (tf = tainted_functions; tf->name; tf++) { - tf->kobj = kobject_create_and_add(tf->name, vmlinux_moddir); - if (!tf->kobj) - return -ENOMEM; - } - - return 0; -} - -static void unregister_tainted_functions(void) -{ - struct tainted_function *tf; - - for (tf = tainted_functions; tf->name; tf++) { - if (!tf->kobj) - return; - kobject_put(tf->kobj); - } -} - static inline void unregister_plugsched_sysfs(void) { unregister_tainted_functions(); @@ -573,7 +533,7 @@ static int register_plugsched_sysfs(void) return -ENOMEM; } - if (register_tainted_functions()) { + if (register_tainted_functions(vmlinux_moddir)) { printk("scheduler: Error: Register taint functions failed!\n"); unregister_plugsched_sysfs(); return -ENOMEM; diff --git a/src/mempool.c b/src/mempool.c new file mode 100644 index 0000000..2e8259f --- /dev/null +++ b/src/mempool.c @@ -0,0 +1,269 @@ +/** + * Copyright 2019-2023 Alibaba Group Holding Limited. + * SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + */ + +#ifdef SCHEDMOD_MEMPOOL + +#include +#include "mempool.h" + +static inline void *simple_mempool_alloc(struct simple_mempool *smpool) +{ + void *ret; + + ret = smpool->alloc_addr; + smpool->alloc_addr += smpool->obj_size; + + return ret; +} + +static inline struct simple_mempool *simple_mempool_create(int obj_num, int obj_size) +{ + struct simple_mempool *smpool; + + smpool = kzalloc_node(sizeof(*smpool), GFP_ATOMIC, 0); + if (!smpool) + return NULL; + + smpool->vstart = vmalloc_node(obj_num * obj_size, 0); + if (!smpool->vstart) { + kfree(smpool); + return NULL; + } + + smpool->alloc_addr = smpool->vstart; + smpool->vend = smpool->vstart + obj_num * obj_size; + smpool->obj_size = obj_size; + smpool->obj_num = obj_num; + + return smpool; +} + +static inline void simple_mempool_destory(struct simple_mempool *smpool) +{ + vfree(smpool->vstart); + kfree(smpool); +} + +static struct simple_percpu_mempool *simple_percpu_mempool_create(int obj_num, + int obj_size) +{ + unsigned int areas, objs_per_area, cnt = 0; + struct simple_percpu_mempool *psmpool; + void *ptr; + + psmpool = kzalloc_node(sizeof(*psmpool), GFP_ATOMIC, 0); + if (!psmpool) + return NULL; + + /* Calculate how many percpu areas are required. */ + objs_per_area = PCPU_MIN_UNIT_SIZE / obj_size; + areas = (obj_num + objs_per_area - 1) / objs_per_area; + + psmpool->percpu_ptr = + kzalloc_node(sizeof(unsigned long) * areas, GFP_ATOMIC, 0); + if (!psmpool->percpu_ptr) + goto error; + + for (cnt = 0; cnt < areas; cnt++) { + ptr = __alloc_percpu(PCPU_MIN_UNIT_SIZE, obj_size); + if (!ptr) + goto error; + + psmpool->percpu_ptr[cnt] = (unsigned long)ptr; + } + + psmpool->obj_size = obj_size; + psmpool->objs_per_area = objs_per_area; + psmpool->areas = areas; + + return psmpool; + +error: + while (cnt > 0) + free_percpu((void *)psmpool->percpu_ptr[--cnt]); + + kfree(psmpool->percpu_ptr); + kfree(psmpool); + + return NULL; +} + +static void *simple_percpu_mempool_alloc(struct simple_percpu_mempool *psmpool) +{ + unsigned long area_size, ret; + + area_size = psmpool->obj_size * psmpool->objs_per_area; + + if ((psmpool->allocated_size + psmpool->obj_size) > area_size) { + psmpool->area_id++; + psmpool->allocated_size = 0; + } + + ret = psmpool->percpu_ptr[psmpool->area_id] + psmpool->allocated_size; + psmpool->allocated_size += psmpool->obj_size; + + return (void *)ret; +} + +static void simple_percpu_mempool_destory(struct simple_percpu_mempool *psmpool) +{ + int i; + + for (i = 0; i < psmpool->areas; i++) + free_percpu((void *)psmpool->percpu_ptr[i]); + + kfree(psmpool->percpu_ptr); + kfree(psmpool); +} + +static inline bool is_simple_percpu_mempool_addr( + struct simple_percpu_mempool *psmpool, void *_addr) +{ + int i; + unsigned long addr, area_size, base; + + addr = (unsigned long)_addr; + area_size = psmpool->obj_size * psmpool->objs_per_area; + + for (i = 0; i < psmpool->areas; i++) { + base = psmpool->percpu_ptr[i]; + if (addr >= base && addr < (base + area_size)) + return true; + } + + return false; +} + +/* + * Examples of simple mempool usage + + * #define nr_tgs atomic_read(&cpu_cgrp_subsys.root->nr_cgrps) + * + * DEFINE_RESERVE(sched_statistics, + * bvt, + * se, + * nr_threads + nr_cpu_ids + (nr_tgs - 1) * nr_cpu_ids, + * (nr_threads + nr_cpu_ids + (nr_tgs - 1) * nr_cpu_ids)*2) + * + * DEFINE_RESERVE(rq, // struct rq + * bvt, // struct rq's bvt field + * rq, // name the mempool as rq_smp + * nr_cpu_ids, // we need exactly nr_cpu_ids objects + * nr_cpu_ids); // we alloc nr_cpu_ids objects before stop_machine + * + * DEFINE_RESERVE_PERCPU(task_struct, // struct task_struct + * percpu_var, // task_struct's new percpu_var feild + * percpu_var, // name the percpu mempool as percpu_var_smp + * nr_threads + nr_cpu_ids,// we need exactly nr_cpu_ids objects + * nr_threads + nr_cpu_ids)// we alloc nr_cpu_ids objects before stop_machine + */ + +static int sched_mempools_create(void) +{ + int err; + + /* + * Examples of mempools create + * if ((err = create_mempool_se())) + * return err; + + * if ((err = create_mempool_rq())) + * return err; + + * if (err = create_mempool_percpu_var()) + * return err; + */ + + return 0; +} + +static void sched_mempools_destroy(void) +{ + /* + * Examples of mempools destroy + * simple_mempool_destory(se_smp); + * simple_mempool_destory(rq_smp); + * simple_percpu_mempool_destory(percpu_var_smp); + */ +} + +static int recheck_smps(void) +{ + int err = -ENOMEM; + + /* + * Examples of mempools recheck + * if ((err = recheck_mempool_rq())) + * return err; + + * if ((err = recheck_mempool_se())) + * return err; + + * if ((err = recheck_mempool_percpu_var())) + * return err; + */ + + return 0; +} + +static void sched_alloc_extrapad(void) +{ + /* TODO: Exploit all CPUs */ + + /* + * Examples of alloc extrapad + * struct task_struct *p, *t; + * struct task_group *tg; + * int cpu; + + * for_each_possible_cpu(cpu) { + * cpu_rq(cpu)->bvt = alloc_rq_reserve(); + * idle_task(cpu)->se.statistics.bvt = alloc_se_reserve(); + * } + + * for_each_process_thread(p, t) + * t->se.statistics.bvt = alloc_se_reserve(); + + * list_for_each_entry_rcu(tg, &task_groups, list) { + * if (tg == &root_task_group || task_group_is_autogroup(tg)) + * continue; + * for_each_possible_cpu(cpu) + * tg->se[cpu]->statistics.bvt = alloc_se_reserve(); + * } + + * for_each_process_thread (p, t) + * t->percpu_var = alloc_percpu_var_reserve(); + */ +} + +static void sched_free_extrapad(void) +{ + /* TODO: Exploit all CPUs */ + + /* + * Examples of free extrapad + * struct task_struct *p, *t; + * struct task_group *tg; + * int cpu; + + * for_each_possible_cpu(cpu) { + * release_se_reserve(&idle_task(cpu)->se.statistics); + * release_rq_reserve(cpu_rq(cpu)); + * } + * for_each_process_thread (p, t) + * release_se_reserve(&t->se.statistics); + + * list_for_each_entry_rcu(tg, &task_groups, list) { + * if (tg == &root_task_group || task_group_is_autogroup(tg)) + * continue; + * for_each_possible_cpu(cpu) + * release_se_reserve(&tg->se[cpu]->statistics); + * } + + * for_each_process_thread(p, t) + * release_percpu_var_reserve(t); + */ +} +#endif \ No newline at end of file diff --git a/src/mempool.h b/src/mempool.h index 9aaa9ae..f00dcad 100644 --- a/src/mempool.h +++ b/src/mempool.h @@ -33,146 +33,18 @@ struct simple_percpu_mempool { unsigned int area_id; }; -static inline void *simple_mempool_alloc(struct simple_mempool *smpool) -{ - void *ret; - - ret = smpool->alloc_addr; - smpool->alloc_addr += smpool->obj_size; - - return ret; -} - -static inline struct simple_mempool *simple_mempool_create(int obj_num, int obj_size) -{ - struct simple_mempool *smpool; - - smpool = kzalloc_node(sizeof(*smpool), GFP_ATOMIC, 0); - if (!smpool) - return NULL; - - smpool->vstart = vmalloc_node(obj_num * obj_size, 0); - if (!smpool->vstart) { - kfree(smpool); - return NULL; - } - - smpool->alloc_addr = smpool->vstart; - smpool->vend = smpool->vstart + obj_num * obj_size; - smpool->obj_size = obj_size; - smpool->obj_num = obj_num; - - return smpool; -} - -static inline void simple_mempool_destory(struct simple_mempool *smpool) -{ - vfree(smpool->vstart); - kfree(smpool); -} - -static struct simple_percpu_mempool *simple_percpu_mempool_create(int obj_num, - int obj_size) -{ - unsigned int areas, objs_per_area, cnt = 0; - struct simple_percpu_mempool *psmpool; - void *ptr; - - psmpool = kzalloc_node(sizeof(*psmpool), GFP_ATOMIC, 0); - if (!psmpool) - return NULL; - - /* Calculate how many percpu areas are required. */ - objs_per_area = PCPU_MIN_UNIT_SIZE / obj_size; - areas = (obj_num + objs_per_area - 1) / objs_per_area; - - psmpool->percpu_ptr = - kzalloc_node(sizeof(unsigned long) * areas, GFP_ATOMIC, 0); - if (!psmpool->percpu_ptr) - goto error; - - for (cnt = 0; cnt < areas; cnt++) { - ptr = __alloc_percpu(PCPU_MIN_UNIT_SIZE, obj_size); - if (!ptr) - goto error; - - psmpool->percpu_ptr[cnt] = (unsigned long)ptr; - } - - psmpool->obj_size = obj_size; - psmpool->objs_per_area = objs_per_area; - psmpool->areas = areas; - - return psmpool; - -error: - while (cnt > 0) - free_percpu((void *)psmpool->percpu_ptr[--cnt]); - - kfree(psmpool->percpu_ptr); - kfree(psmpool); - - return NULL; -} - -static void *simple_percpu_mempool_alloc(struct simple_percpu_mempool *psmpool) -{ - unsigned long area_size, ret; - - area_size = psmpool->obj_size * psmpool->objs_per_area; - - if ((psmpool->allocated_size + psmpool->obj_size) > area_size) { - psmpool->area_id++; - psmpool->allocated_size = 0; - } - - ret = psmpool->percpu_ptr[psmpool->area_id] + psmpool->allocated_size; - psmpool->allocated_size += psmpool->obj_size; - - return (void *)ret; -} - -static void simple_percpu_mempool_destory(struct simple_percpu_mempool *psmpool) -{ - int i; - - for (i = 0; i < psmpool->areas; i++) - free_percpu((void *)psmpool->percpu_ptr[i]); - - kfree(psmpool->percpu_ptr); - kfree(psmpool); -} - -static inline bool is_simple_percpu_mempool_addr( - struct simple_percpu_mempool *psmpool, void *_addr) -{ - int i; - unsigned long addr, area_size, base; - - addr = (unsigned long)_addr; - area_size = psmpool->obj_size * psmpool->objs_per_area; - - for (i = 0; i < psmpool->areas; i++) { - base = psmpool->percpu_ptr[i]; - if (addr >= base && addr < (base + area_size)) - return true; - } - - return false; -} - #define FIELD_TYPE(t, f) typeof(((struct t*)0)->f) #define FIELD_INDIRECT_TYPE(t, f) typeof(*((struct t*)0)->f) #define DEFINE_RESERVE(type, field, name, require, max) \ static struct simple_mempool *name##_smp = NULL; \ -static void release_##name##_reserve(struct type *x) \ +void release_##name##_reserve(struct type *x) \ { \ if (!is_simple_mempool_addr(name##_smp, x->field)) \ kfree(x->field); \ x->field = NULL; \ } \ -static FIELD_TYPE(type, field) alloc_##name##_reserve(void) \ +FIELD_TYPE(type, field) alloc_##name##_reserve(void) \ { \ return simple_mempool_alloc(name##_smp); \ } \ @@ -193,13 +65,13 @@ static int recheck_mempool_##name(void) \ #define DEFINE_RESERVE_PERCPU(type, field, name, require, max) \ static struct simple_percpu_mempool *name##_smp = NULL; \ -static void release_##name##_reserve(struct type *x) \ +void release_##name##_reserve(struct type *x) \ { \ if (!is_simple_percpu_mempool_addr(name##_smp, x->field)) \ free_percpu((void *)x->field); \ x->field = NULL; \ } \ -static FIELD_TYPE(type, field) alloc_##name##_reserve(void) \ +FIELD_TYPE(type, field) alloc_##name##_reserve(void) \ { \ return simple_percpu_mempool_alloc(name##_smp); \ } \ @@ -217,138 +89,11 @@ static int recheck_mempool_##name(void) \ return -ENOMEM; \ return 0; \ } - -/* - * Examples of simple mempool usage - - * #define nr_tgs atomic_read(&cpu_cgrp_subsys.root->nr_cgrps) - * - * DEFINE_RESERVE(sched_statistics, - * bvt, - * se, - * nr_threads + nr_cpu_ids + (nr_tgs - 1) * nr_cpu_ids, - * (nr_threads + nr_cpu_ids + (nr_tgs - 1) * nr_cpu_ids)*2) - * - * DEFINE_RESERVE(rq, // struct rq - * bvt, // struct rq's bvt field - * rq, // name the mempool as rq_smp - * nr_cpu_ids, // we need exactly nr_cpu_ids objects - * nr_cpu_ids); // we alloc nr_cpu_ids objects before stop_machine - * - * DEFINE_RESERVE_PERCPU(task_struct, // struct task_struct - * percpu_var, // task_struct's new percpu_var feild - * percpu_var, // name the percpu mempool as percpu_var_smp - * nr_threads + nr_cpu_ids,// we need exactly nr_cpu_ids objects - * nr_threads + nr_cpu_ids)// we alloc nr_cpu_ids objects before stop_machine - */ - -static int sched_mempools_create(void) -{ - int err; - - /* - * Examples of mempools create - * if ((err = create_mempool_se())) - * return err; - - * if ((err = create_mempool_rq())) - * return err; - - * if (err = create_mempool_percpu_var()) - * return err; - */ - - return 0; -} - -static void sched_mempools_destroy(void) -{ - /* - * Examples of mempools destroy - * simple_mempool_destory(se_smp); - * simple_mempool_destory(rq_smp); - * simple_percpu_mempool_destory(percpu_var_smp); - */ -} - -static int recheck_smps(void) -{ - int err = -ENOMEM; - - /* - * Examples of mempools recheck - * if ((err = recheck_mempool_rq())) - * return err; - - * if ((err = recheck_mempool_se())) - * return err; - - * if ((err = recheck_mempool_percpu_var())) - * return err; - */ - - return 0; -} - -static void sched_alloc_extrapad(void) -{ - /* TODO: Exploit all CPUs */ - - /* - * Examples of alloc extrapad - * struct task_struct *p, *t; - * struct task_group *tg; - * int cpu; - - * for_each_possible_cpu(cpu) { - * cpu_rq(cpu)->bvt = alloc_rq_reserve(); - * idle_task(cpu)->se.statistics.bvt = alloc_se_reserve(); - * } - - * for_each_process_thread(p, t) - * t->se.statistics.bvt = alloc_se_reserve(); - - * list_for_each_entry_rcu(tg, &task_groups, list) { - * if (tg == &root_task_group || task_group_is_autogroup(tg)) - * continue; - * for_each_possible_cpu(cpu) - * tg->se[cpu]->statistics.bvt = alloc_se_reserve(); - * } - - * for_each_process_thread (p, t) - * t->percpu_var = alloc_percpu_var_reserve(); - */ -} - -static void sched_free_extrapad(void) -{ - /* TODO: Exploit all CPUs */ - - /* - * Examples of free extrapad - * struct task_struct *p, *t; - * struct task_group *tg; - * int cpu; - - * for_each_possible_cpu(cpu) { - * release_se_reserve(&idle_task(cpu)->se.statistics); - * release_rq_reserve(cpu_rq(cpu)); - * } - * for_each_process_thread (p, t) - * release_se_reserve(&t->se.statistics); - - * list_for_each_entry_rcu(tg, &task_groups, list) { - * if (tg == &root_task_group || task_group_is_autogroup(tg)) - * continue; - * for_each_possible_cpu(cpu) - * release_se_reserve(&tg->se[cpu]->statistics); - * } - - * for_each_process_thread(p, t) - * release_percpu_var_reserve(t); - */ -} - +extern int recheck_smps(void); +extern void sched_alloc_extrapad(void); +extern void sched_free_extrapad(void); +extern int sched_mempools_create(void); +extern int sched_mempools_destroy(void); #else static inline int recheck_smps(void) { return 0; } static inline void sched_alloc_extrapad(void) { } diff --git a/src/tainted.c b/src/tainted.c new file mode 100644 index 0000000..7967040 --- /dev/null +++ b/src/tainted.c @@ -0,0 +1,37 @@ +#include "tainted.h" + +#undef TAINTED_FUNCTION +#define TAINTED_FUNCTION(func,sympos) \ + { \ + .name = #func "," #sympos, \ + .kobj = NULL, \ + }, + +struct tainted_function tainted_functions[] = { + #include "tainted_functions.h" + { .name = NULL, .kobj = NULL } +}; + +int register_tainted_functions(struct kobject *vmlinux_moddir) +{ + struct tainted_function *tf; + + for (tf = tainted_functions; tf->name; tf++) { + tf->kobj = kobject_create_and_add(tf->name, vmlinux_moddir); + if (!tf->kobj) + return -ENOMEM; + } + + return 0; +} + +void unregister_tainted_functions(void) +{ + struct tainted_function *tf; + + for (tf = tainted_functions; tf->name; tf++) { + if (!tf->kobj) + return; + kobject_put(tf->kobj); + } +} diff --git a/src/tainted.h b/src/tainted.h new file mode 100644 index 0000000..313d370 --- /dev/null +++ b/src/tainted.h @@ -0,0 +1,11 @@ +#include + +struct tainted_function { + char *name; + struct kobject *kobj; +}; + +extern struct tainted_function tainted_functions[]; + +extern int register_tainted_functions(struct kobject *); +extern void unregister_tainted_functions(void); \ No newline at end of file