Skip to content

Commit 7b3708d

Browse files
weiny2sfrothwell
authored andcommitted
{x86,powerpc,microblaze}/kmap: move preempt disable
During this kmap() conversion series we must maintain bisect-ability. To do this, kmap_atomic_prot() in x86, powerpc, and microblaze need to remain functional. Create a temporary inline version of kmap_atomic_prot within these architectures so we can rework their kmap_atomic() calls and then lift kmap_atomic_prot() to the core. Link: http://lkml.kernel.org/r/20200507150004.1423069-6-ira.weiny@intel.com Signed-off-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Suggested-by: Al Viro <viro@zeniv.linux.org.uk> Cc: Andy Lutomirski <luto@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Christian König <christian.koenig@amd.com> Cc: Chris Zankel <chris@zankel.net> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Helge Deller <deller@gmx.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
1 parent 56e635a commit 7b3708d

File tree

6 files changed

+36
-26
lines changed

6 files changed

+36
-26
lines changed

arch/microblaze/include/asm/highmem.h

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,16 @@ extern pte_t *pkmap_page_table;
5151
#define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
5252
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
5353

54-
extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
54+
extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
55+
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
56+
{
57+
preempt_disable();
58+
pagefault_disable();
59+
if (!PageHighMem(page))
60+
return page_address(page);
61+
62+
return kmap_atomic_high_prot(page, prot);
63+
}
5564
extern void __kunmap_atomic(void *kvaddr);
5665

5766
static inline void *kmap_atomic(struct page *page)

arch/microblaze/mm/highmem.c

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -32,18 +32,12 @@
3232
*/
3333
#include <asm/tlbflush.h>
3434

35-
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
35+
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
3636
{
3737

3838
unsigned long vaddr;
3939
int idx, type;
4040

41-
preempt_disable();
42-
pagefault_disable();
43-
if (!PageHighMem(page))
44-
return page_address(page);
45-
46-
4741
type = kmap_atomic_idx_push();
4842
idx = type + KM_TYPE_NR*smp_processor_id();
4943
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
@@ -55,7 +49,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
5549

5650
return (void *) vaddr;
5751
}
58-
EXPORT_SYMBOL(kmap_atomic_prot);
52+
EXPORT_SYMBOL(kmap_atomic_high_prot);
5953

6054
void __kunmap_atomic(void *kvaddr)
6155
{

arch/powerpc/include/asm/highmem.h

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,16 @@ extern pte_t *pkmap_page_table;
5959
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
6060
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
6161

62-
extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
62+
extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
63+
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
64+
{
65+
preempt_disable();
66+
pagefault_disable();
67+
if (!PageHighMem(page))
68+
return page_address(page);
69+
70+
return kmap_atomic_high_prot(page, prot);
71+
}
6372
extern void __kunmap_atomic(void *kvaddr);
6473

6574
static inline void *kmap_atomic(struct page *page)

arch/powerpc/mm/highmem.c

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -30,16 +30,11 @@
3030
* be used in IRQ contexts, so in some (very limited) cases we need
3131
* it.
3232
*/
33-
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
33+
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
3434
{
3535
unsigned long vaddr;
3636
int idx, type;
3737

38-
preempt_disable();
39-
pagefault_disable();
40-
if (!PageHighMem(page))
41-
return page_address(page);
42-
4338
type = kmap_atomic_idx_push();
4439
idx = type + KM_TYPE_NR*smp_processor_id();
4540
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
@@ -49,7 +44,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
4944

5045
return (void*) vaddr;
5146
}
52-
EXPORT_SYMBOL(kmap_atomic_prot);
47+
EXPORT_SYMBOL(kmap_atomic_high_prot);
5348

5449
void __kunmap_atomic(void *kvaddr)
5550
{

arch/x86/include/asm/highmem.h

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,16 @@ extern unsigned long highstart_pfn, highend_pfn;
5858
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
5959
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
6060

61-
void *kmap_atomic_prot(struct page *page, pgprot_t prot);
61+
extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
62+
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
63+
{
64+
preempt_disable();
65+
pagefault_disable();
66+
if (!PageHighMem(page))
67+
return page_address(page);
68+
69+
return kmap_atomic_high_prot(page, prot);
70+
}
6271
void *kmap_atomic(struct page *page);
6372
void __kunmap_atomic(void *kvaddr);
6473
void *kmap_atomic_pfn(unsigned long pfn);

arch/x86/mm/highmem_32.c

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -12,17 +12,11 @@
1212
* However when holding an atomic kmap it is not legal to sleep, so atomic
1313
* kmaps are appropriate for short, tight code paths only.
1414
*/
15-
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
15+
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
1616
{
1717
unsigned long vaddr;
1818
int idx, type;
1919

20-
preempt_disable();
21-
pagefault_disable();
22-
23-
if (!PageHighMem(page))
24-
return page_address(page);
25-
2620
type = kmap_atomic_idx_push();
2721
idx = type + KM_TYPE_NR*smp_processor_id();
2822
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
@@ -32,7 +26,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
3226

3327
return (void *)vaddr;
3428
}
35-
EXPORT_SYMBOL(kmap_atomic_prot);
29+
EXPORT_SYMBOL(kmap_atomic_high_prot);
3630

3731
void *kmap_atomic(struct page *page)
3832
{

0 commit comments

Comments
 (0)