Skip to content

Commit 2cb3427

Browse files
xairytorvalds
authored andcommitted
arm64: kasan: simplify and inline MTE functions
This change provides a simpler implementation of mte_get_mem_tag(), mte_get_random_tag(), and mte_set_mem_tag_range(). Simplifications include removing system_supports_mte() checks as these functions are onlye called from KASAN runtime that had already checked system_supports_mte(). Besides that, size and address alignment checks are removed from mte_set_mem_tag_range(), as KASAN now does those. This change also moves these functions into the asm/mte-kasan.h header and implements mte_set_mem_tag_range() via inline assembly to avoid unnecessary functions calls. [vincenzo.frascino@arm.com: fix warning in mte_get_random_tag()] Link: https://lkml.kernel.org/r/20210211152208.23811-1-vincenzo.frascino@arm.com Link: https://lkml.kernel.org/r/a26121b294fdf76e369cb7a74351d1c03a908930.1612546384.git.andreyknvl@google.com Co-developed-by: Vincenzo Frascino <vincenzo.frascino@arm.com> Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com> Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Branislav Rankov <Branislav.Rankov@arm.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Kevin Brodsky <kevin.brodsky@arm.com> Cc: Marco Elver <elver@google.com> Cc: Peter Collingbourne <pcc@google.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent cde8a7e commit 2cb3427

File tree

7 files changed

+60
-73
lines changed

7 files changed

+60
-73
lines changed

arch/arm64/include/asm/cache.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
#define __ASM_CACHE_H
77

88
#include <asm/cputype.h>
9-
#include <asm/mte-kasan.h>
109

1110
#define CTR_L1IP_SHIFT 14
1211
#define CTR_L1IP_MASK 3

arch/arm64/include/asm/kasan.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66

77
#include <linux/linkage.h>
88
#include <asm/memory.h>
9+
#include <asm/mte-kasan.h>
910
#include <asm/pgtable-types.h>
1011

1112
#define arch_kasan_set_tag(addr, tag) __tag_set(addr, tag)

arch/arm64/include/asm/mte-def.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,4 +11,6 @@
1111
#define MTE_TAG_SIZE 4
1212
#define MTE_TAG_MASK GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)
1313

14+
#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n"
15+
1416
#endif /* __ASM_MTE_DEF_H */

arch/arm64/include/asm/mte-kasan.h

Lines changed: 57 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -11,11 +11,14 @@
1111

1212
#include <linux/types.h>
1313

14+
#ifdef CONFIG_ARM64_MTE
15+
1416
/*
15-
* The functions below are meant to be used only for the
16-
* KASAN_HW_TAGS interface defined in asm/memory.h.
17+
* These functions are meant to be only used from KASAN runtime through
18+
* the arch_*() interface defined in asm/memory.h.
19+
* These functions don't include system_supports_mte() checks,
20+
* as KASAN only calls them when MTE is supported and enabled.
1721
*/
18-
#ifdef CONFIG_ARM64_MTE
1922

2023
static inline u8 mte_get_ptr_tag(void *ptr)
2124
{
@@ -25,9 +28,54 @@ static inline u8 mte_get_ptr_tag(void *ptr)
2528
return tag;
2629
}
2730

28-
u8 mte_get_mem_tag(void *addr);
29-
u8 mte_get_random_tag(void);
30-
void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag);
31+
/* Get allocation tag for the address. */
32+
static inline u8 mte_get_mem_tag(void *addr)
33+
{
34+
asm(__MTE_PREAMBLE "ldg %0, [%0]"
35+
: "+r" (addr));
36+
37+
return mte_get_ptr_tag(addr);
38+
}
39+
40+
/* Generate a random tag. */
41+
static inline u8 mte_get_random_tag(void)
42+
{
43+
void *addr;
44+
45+
asm(__MTE_PREAMBLE "irg %0, %0"
46+
: "=r" (addr));
47+
48+
return mte_get_ptr_tag(addr);
49+
}
50+
51+
/*
52+
* Assign allocation tags for a region of memory based on the pointer tag.
53+
* Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
54+
* size must be non-zero and MTE_GRANULE_SIZE aligned.
55+
*/
56+
static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
57+
{
58+
u64 curr, end;
59+
60+
if (!size)
61+
return;
62+
63+
curr = (u64)__tag_set(addr, tag);
64+
end = curr + size;
65+
66+
do {
67+
/*
68+
* 'asm volatile' is required to prevent the compiler to move
69+
* the statement outside of the loop.
70+
*/
71+
asm volatile(__MTE_PREAMBLE "stg %0, [%0]"
72+
:
73+
: "r" (curr)
74+
: "memory");
75+
76+
curr += MTE_GRANULE_SIZE;
77+
} while (curr != end);
78+
}
3179

3280
void mte_enable_kernel(void);
3381
void mte_init_tags(u64 max_tag);
@@ -46,13 +94,14 @@ static inline u8 mte_get_mem_tag(void *addr)
4694
{
4795
return 0xFF;
4896
}
97+
4998
static inline u8 mte_get_random_tag(void)
5099
{
51100
return 0xFF;
52101
}
53-
static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
102+
103+
static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
54104
{
55-
return addr;
56105
}
57106

58107
static inline void mte_enable_kernel(void)

arch/arm64/include/asm/mte.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,6 @@
88
#include <asm/compiler.h>
99
#include <asm/mte-def.h>
1010

11-
#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n"
12-
1311
#ifndef __ASSEMBLY__
1412

1513
#include <linux/bitfield.h>

arch/arm64/kernel/mte.c

Lines changed: 0 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
#include <asm/barrier.h>
2020
#include <asm/cpufeature.h>
2121
#include <asm/mte.h>
22-
#include <asm/mte-kasan.h>
2322
#include <asm/ptrace.h>
2423
#include <asm/sysreg.h>
2524

@@ -88,51 +87,6 @@ int memcmp_pages(struct page *page1, struct page *page2)
8887
return ret;
8988
}
9089

91-
u8 mte_get_mem_tag(void *addr)
92-
{
93-
if (!system_supports_mte())
94-
return 0xFF;
95-
96-
asm(__MTE_PREAMBLE "ldg %0, [%0]"
97-
: "+r" (addr));
98-
99-
return mte_get_ptr_tag(addr);
100-
}
101-
102-
u8 mte_get_random_tag(void)
103-
{
104-
void *addr;
105-
106-
if (!system_supports_mte())
107-
return 0xFF;
108-
109-
asm(__MTE_PREAMBLE "irg %0, %0"
110-
: "+r" (addr));
111-
112-
return mte_get_ptr_tag(addr);
113-
}
114-
115-
void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
116-
{
117-
void *ptr = addr;
118-
119-
if ((!system_supports_mte()) || (size == 0))
120-
return addr;
121-
122-
/* Make sure that size is MTE granule aligned. */
123-
WARN_ON(size & (MTE_GRANULE_SIZE - 1));
124-
125-
/* Make sure that the address is MTE granule aligned. */
126-
WARN_ON((u64)addr & (MTE_GRANULE_SIZE - 1));
127-
128-
tag = 0xF0 | tag;
129-
ptr = (void *)__tag_set(ptr, tag);
130-
131-
mte_assign_mem_tag_range(ptr, size);
132-
133-
return ptr;
134-
}
135-
13690
void mte_init_tags(u64 max_tag)
13791
{
13892
static bool gcr_kernel_excl_initialized;

arch/arm64/lib/mte.S

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -149,19 +149,3 @@ SYM_FUNC_START(mte_restore_page_tags)
149149

150150
ret
151151
SYM_FUNC_END(mte_restore_page_tags)
152-
153-
/*
154-
* Assign allocation tags for a region of memory based on the pointer tag
155-
* x0 - source pointer
156-
* x1 - size
157-
*
158-
* Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
159-
* size must be non-zero and MTE_GRANULE_SIZE aligned.
160-
*/
161-
SYM_FUNC_START(mte_assign_mem_tag_range)
162-
1: stg x0, [x0]
163-
add x0, x0, #MTE_GRANULE_SIZE
164-
subs x1, x1, #MTE_GRANULE_SIZE
165-
b.gt 1b
166-
ret
167-
SYM_FUNC_END(mte_assign_mem_tag_range)

0 commit comments

Comments
 (0)