Skip to content
This repository has been archived by the owner on Jul 16, 2024. It is now read-only.

Livepatch backporting from Openeuler-kernel #141

Open
wants to merge 52 commits into
base: linux-5.10.y
Choose a base branch
from
Open
Changes from 1 commit
Commits
Show all changes
52 commits
Select commit Hold shift + click to select a range
2ac5206
livepatch/core: Allow implementation without ftrace
May 29, 2021
2538132
livepatch/core: Restrict livepatch patched/unpatched when plant kprobe
May 29, 2021
5267334
livepatch/core: Split livepatch consistency
May 29, 2021
3627ca4
livepatch/core: Supprt load and unload hooks
May 29, 2021
5fab5bb
livepatch/core: Support jump_label
May 29, 2021
607e697
livepatch/arm64: Support livepatch without ftrace
May 29, 2021
bb2fb61
livepatch/arm64: Fix func size less than limit
May 29, 2021
6b8b573
livepatch/arm64: Fix current backtracking in klp_check_calltrace
May 29, 2021
edb391b
livepatch/arm64: check active func in consistency stack checking
May 29, 2021
0ec4bd6
livepatch/core: Add livepatch consistency depends
May 29, 2021
889355b
arm/module: Use plt section indices for relocations
May 29, 2021
88b2c07
livepatch/core: Add support for arm for klp relocation
May 29, 2021
9e359f7
livepatch/arm: Support livepatch without ftrace
libin2015 May 29, 2021
62cbf70
livepatch/arm: Fix current backtracking in klp_check_calltrace
May 29, 2021
d0cabc3
livepatch/arm: Add support for livepatch plt
May 29, 2021
a86eca7
livepatch/arm: Check active func in consistency stack checking
May 29, 2021
12474d8
livepatch/core: Revert module_enable_ro and module_disable_ro
May 29, 2021
8958ee7
livepatch/x86: support livepatch without ftrace
May 29, 2021
ee6ddb6
livepatch/x86: check active func in consistency stack checking
May 29, 2021
48a998d
livepatch/ppc32: Support livepatch without ftrace
libin2015 May 29, 2021
1e1277a
livepatch/ppc32: Add support for longjump
May 29, 2021
e20f0a5
livepatch/ppc32: Fix func size less than limit
May 29, 2021
ec82dac
livepatch/ppc32: Fix current backtracking in klp_check_calltrace
May 29, 2021
006790f
livepatch/ppc32: Check active func in consistency stack checking
May 29, 2021
40c32d1
livepatch/ppc32: Ignore the first frame when checking stack
shaolexi May 29, 2021
2ce1a73
livepatch/ppc64: Implement livepatch without ftrace for ppc64be
May 29, 2021
7bf8706
livepatch/ppc64: Support use func_descr for new_func
May 29, 2021
d9b284c
livepatch/ppc64: Check active func in consistency stack checking
May 29, 2021
3f714e9
livepatch/ppc64: Implement per func_node livepatch trampoline
May 29, 2021
5ee6c47
livepatch/ppc64: Use module_alloc to alloc func_node
May 29, 2021
315f9cb
livepatch/ppc64: Make sure caller function in stack
May 29, 2021
d0e423a
livepatch/ppc64: Ignore the first frame when checking stack
shaolexi May 29, 2021
7846fcf
livepatch/ppc64: Sample testcase fix ppc64
May 29, 2021
f8e6bcd
livepatch/ppc64: Enable livepatch without ftrace
May 29, 2021
520deb6
livepatch/core: Support function force patched/unpatched
May 29, 2021
e8ef67e
livepatch: put memory alloc and free out stop machine
May 31, 2021
2501cec
livepatch: fix unload hook could not be excuted
Jun 23, 2021
a4ab930
livepatch: Add state describe for force
Oct 19, 2021
e4f028c
livepatch: checks only if the replaced instruction is on the stack
Oct 19, 2021
108cf88
livepatch/arm64: only check stack top
Oct 19, 2021
1e9df51
livepatch/arm: only check stack top
Oct 19, 2021
ff00659
livepatch/ppc32: only check stack top
Oct 19, 2021
38aa662
livepatch/ppc64: only check stack top
Oct 19, 2021
5fef18a
livepatch/x86: only check stack top
Oct 19, 2021
fbcdf89
livepatch: move arch_klp_mem_recycle after the return value judgment
Oct 19, 2021
4321455
livepatch: Fix compile warnning
Oct 19, 2021
32b0d02
livepatch: Add klp_{register,unregister}_patch for stop_machine model
Oct 19, 2021
96b009e
livepatch: Adapt livepatch-sample for stop_machine model
Oct 19, 2021
de27655
livepatch: Check whole stack when CONFIG_PREEMPT is set
Nov 15, 2021
cc1ffe4
livepatch: Fix crash when access the global variable in hook
Nov 26, 2021
048eaa3
livepatch/arm: fix incorrect stack detection
Nov 30, 2021
a59ce34
altra: enable CONFIG_LIVEPATCH
bobolmw Jan 24, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
livepatch/arm: only check stack top
hulk inclusion
category: feature
bugzilla: 119440 https://gitee.com/openeuler/kernel/issues/I4DDEL

--------------------------------

Enable stack optimize on arm.

Signed-off-by: Ye Weihua <yeweihua4@huawei.com>
Reviewed-by: Kuohai Xu <xukuohai@huawei.com>
Signed-off-by: Chen Jun <chenjun102@huawei.com>
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com>
  • Loading branch information
Ye Weihua authored and bobolmw committed Jan 16, 2022
commit 1e9df51a01be1831d9b00d44be0f76aa3a9284d0
193 changes: 169 additions & 24 deletions arch/arm/kernel/livepatch.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,18 +40,21 @@
#ifdef CONFIG_ARM_MODULE_PLTS
#define LJMP_INSN_SIZE 3
#define MAX_SIZE_TO_CHECK (LJMP_INSN_SIZE * ARM_INSN_SIZE)
#define CHECK_JUMP_RANGE LJMP_INSN_SIZE

#else
#define MAX_SIZE_TO_CHECK ARM_INSN_SIZE
#define CHECK_JUMP_RANGE 1
#endif

struct klp_func_node {
struct list_head node;
struct list_head func_stack;
void *old_func;
#ifdef CONFIG_ARM_MODULE_PLTS
u32 old_insns[LJMP_INSN_SIZE];
u32 old_insns[LJMP_INSN_SIZE];
#else
u32 old_insn;
u32 old_insn;
#endif
};

Expand All @@ -70,9 +73,38 @@ static struct klp_func_node *klp_find_func_node(void *old_func)
}

#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
/*
* The instruction set on arm is A32.
* The instruction of BL is xxxx1011xxxxxxxxxxxxxxxxxxxxxxxx, and first four
* bits could not be 1111.
* The instruction of BLX(immediate) is 1111101xxxxxxxxxxxxxxxxxxxxxxxxx.
* The instruction of BLX(register) is xxxx00010010xxxxxxxxxxxx0011xxxx, and
* first four bits could not be 1111.
*/
static bool is_jump_insn(u32 insn)
{
if (((insn & 0x0f000000) == 0x0b000000) &&
((insn & 0xf0000000) != 0xf0000000))
return true;
if ((insn & 0xfe000000) == 0xfa000000)
return true;
if (((insn & 0x0ff000f0) == 0x01200030) &&
((insn & 0xf0000000) != 0xf0000000))
return true;
return false;
}

struct klp_func_list {
struct klp_func_list *next;
unsigned long func_addr;
unsigned long func_size;
const char *func_name;
int force;
};

struct walk_stackframe_args {
struct klp_patch *patch;
int enable;
struct klp_func_list *other_funcs;
int ret;
};

Expand All @@ -96,22 +128,59 @@ static inline int klp_compare_address(unsigned long pc, unsigned long func_addr,
return 0;
}

static int klp_check_activeness_func(struct stackframe *frame, void *data)
static bool check_jump_insn(unsigned long func_addr)
{
struct walk_stackframe_args *args = data;
struct klp_patch *patch = args->patch;
unsigned long i;
u32 *insn = (u32*)func_addr;

for (i = 0; i < CHECK_JUMP_RANGE; i++) {
if (is_jump_insn(*insn)) {
return true;
}
insn++;
}
return false;
}

static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func,
unsigned long func_addr, unsigned long func_size, const char *func_name,
int force)
{
if (*func == NULL) {
*funcs = (struct klp_func_list*)kzalloc(sizeof(**funcs), GFP_ATOMIC);
if (!(*funcs))
return -ENOMEM;
*func = *funcs;
} else {
(*func)->next = (struct klp_func_list*)kzalloc(sizeof(**funcs),
GFP_ATOMIC);
if (!(*func)->next)
return -ENOMEM;
*func = (*func)->next;
}
(*func)->func_addr = func_addr;
(*func)->func_size = func_size;
(*func)->func_name = func_name;
(*func)->force = force;
(*func)->next = NULL;
return 0;
}

static int klp_check_activeness_func(struct klp_patch *patch, int enable,
struct klp_func_list **nojump_funcs,
struct klp_func_list **other_funcs)
{
int ret;
struct klp_object *obj;
struct klp_func_node *func_node;
struct klp_func *func;
unsigned long func_addr, func_size;
const char *func_name;

if (args->ret)
return args->ret;
struct klp_func_list *pnjump = NULL;
struct klp_func_list *pother = NULL;

for (obj = patch->objs; obj->funcs; obj++) {
for (func = obj->funcs; func->old_name; func++) {
if (args->enable) {
if (enable) {
if (func->force == KLP_ENFORCEMENT)
continue;
/*
Expand Down Expand Up @@ -140,34 +209,102 @@ static int klp_check_activeness_func(struct stackframe *frame, void *data)
func_addr = (unsigned long)prev->new_func;
func_size = prev->new_size;
}
if ((func->force == KLP_STACK_OPTIMIZE) &&
!check_jump_insn(func_addr))
ret = add_func_to_list(nojump_funcs, &pnjump,
func_addr, func_size,
func->old_name, func->force);
else
ret = add_func_to_list(other_funcs, &pother,
func_addr, func_size,
func->old_name, func->force);
if (ret)
return ret;
} else {
/*
* When disable, check for the function itself
* When disable, check for the previously
* patched function and the function itself
* which to be unpatched.
*/
func_node = klp_find_func_node(func->old_func);
if (!func_node)
return -EINVAL;
if (list_is_singular(&func_node->func_stack)) {
func_addr = (unsigned long)func->old_func;
func_size = func->old_size;
} else {
struct klp_func *prev;

prev = list_first_or_null_rcu(
&func_node->func_stack,
struct klp_func, stack_node);
func_addr = (unsigned long)prev->new_func;
func_size = prev->new_size;
}
ret = add_func_to_list(other_funcs, &pother,
func_addr, func_size,
func->old_name, 0);
if (ret)
return ret;
func_addr = (unsigned long)func->new_func;
func_size = func->new_size;
ret = add_func_to_list(other_funcs, &pother,
func_addr, func_size,
func->old_name, 0);
if (ret)
return ret;
}
func_name = func->old_name;
args->ret = klp_compare_address(frame->pc, func_addr, func_name,
klp_size_to_check(func_size, func->force));
if (args->ret)
return args->ret;
}
}
return 0;
}

return args->ret;
static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc)
{
while (funcs != NULL) {
*ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name,
klp_size_to_check(funcs->func_size, funcs->force));
if (*ret) {
return false;
}
funcs = funcs->next;
}
return true;
}

static int klp_check_jump_func(struct stackframe *frame, void *data)
{
struct walk_stackframe_args *args = data;
struct klp_func_list *other_funcs = args->other_funcs;

return check_func_list(other_funcs, &args->ret, frame->pc);
}

static void free_list(struct klp_func_list **funcs)
{
struct klp_func_list *p;

while (*funcs != NULL) {
p = *funcs;
*funcs = (*funcs)->next;
kfree(p);
}
}

int klp_check_calltrace(struct klp_patch *patch, int enable)
{
struct task_struct *g, *t;
struct stackframe frame;
int ret = 0;
struct klp_func_list *nojump_funcs = NULL;
struct klp_func_list *other_funcs = NULL;

ret = klp_check_activeness_func(patch, enable, &nojump_funcs, &other_funcs);
if (ret)
goto out;

struct walk_stackframe_args args = {
.patch = patch,
.enable = enable,
.other_funcs = other_funcs,
.ret = 0
};

Expand All @@ -194,17 +331,25 @@ int klp_check_calltrace(struct klp_patch *patch, int enable)
frame.lr = 0; /* recovered from the stack */
frame.pc = thread_saved_pc(t);
}

walk_stackframe(&frame, klp_check_activeness_func, &args);
if (args.ret) {
ret = args.ret;
if (!check_func_list(nojump_funcs, &ret, frame.pc)) {
pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm);
show_stack(t, NULL, KERN_INFO);
goto out;
}
if (other_funcs != NULL) {
walk_stackframe(&frame, klp_check_jump_func, &args);
if (args.ret) {
ret = args.ret;
pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm);
show_stack(t, NULL, KERN_INFO);
goto out;
}
}
}

out:
free_list(&nojump_funcs);
free_list(&other_funcs);
return ret;
}
#endif
Expand Down