Skip to content

Commit fcff5f9

Browse files
committed
Merge tag 'asm-generic-fixes-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic
Pull asm-generic fixes from Arnd Bergmann: "These are minor fixes to address false-positive build warnings: Some of the less common I/O accessors are missing __force casts and cause sparse warnings for their implied byteswap, and a recent change to __generic_cmpxchg_local() causes a warning about constant integer truncation" * tag 'asm-generic-fixes-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic: asm-generic: avoid __generic_cmpxchg_local warnings asm-generic/io.h: suppress endianness warnings for relaxed accessors asm-generic/io.h: suppress endianness warnings for readq() and writeq()
2 parents 99ddf22 + 656e900 commit fcff5f9

File tree

4 files changed

+19
-19
lines changed

4 files changed

+19
-19
lines changed

include/asm-generic/atomic.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ ATOMIC_OP(xor, ^)
130130
#define arch_atomic_read(v) READ_ONCE((v)->counter)
131131
#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
132132

133-
#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v)))
134-
#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new)))
133+
#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (u32)(v)))
134+
#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (u32)(old), (u32)(new)))
135135

136136
#endif /* __ASM_GENERIC_ATOMIC_H */

include/asm-generic/cmpxchg-local.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -26,16 +26,16 @@ static inline unsigned long __generic_cmpxchg_local(volatile void *ptr,
2626
raw_local_irq_save(flags);
2727
switch (size) {
2828
case 1: prev = *(u8 *)ptr;
29-
if (prev == (u8)old)
30-
*(u8 *)ptr = (u8)new;
29+
if (prev == (old & 0xffu))
30+
*(u8 *)ptr = (new & 0xffu);
3131
break;
3232
case 2: prev = *(u16 *)ptr;
33-
if (prev == (u16)old)
34-
*(u16 *)ptr = (u16)new;
33+
if (prev == (old & 0xffffu))
34+
*(u16 *)ptr = (new & 0xffffu);
3535
break;
3636
case 4: prev = *(u32 *)ptr;
37-
if (prev == (u32)old)
38-
*(u32 *)ptr = (u32)new;
37+
if (prev == (old & 0xffffffffffu))
38+
*(u32 *)ptr = (new & 0xffffffffu);
3939
break;
4040
case 8: prev = *(u64 *)ptr;
4141
if (prev == old)

include/asm-generic/cmpxchg.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size)
3232
#else
3333
local_irq_save(flags);
3434
ret = *(volatile u8 *)ptr;
35-
*(volatile u8 *)ptr = x;
35+
*(volatile u8 *)ptr = (x & 0xffu);
3636
local_irq_restore(flags);
3737
return ret;
3838
#endif /* __xchg_u8 */
@@ -43,7 +43,7 @@ unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size)
4343
#else
4444
local_irq_save(flags);
4545
ret = *(volatile u16 *)ptr;
46-
*(volatile u16 *)ptr = x;
46+
*(volatile u16 *)ptr = (x & 0xffffu);
4747
local_irq_restore(flags);
4848
return ret;
4949
#endif /* __xchg_u16 */
@@ -54,7 +54,7 @@ unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size)
5454
#else
5555
local_irq_save(flags);
5656
ret = *(volatile u32 *)ptr;
57-
*(volatile u32 *)ptr = x;
57+
*(volatile u32 *)ptr = (x & 0xffffffffu);
5858
local_irq_restore(flags);
5959
return ret;
6060
#endif /* __xchg_u32 */

include/asm-generic/io.h

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -236,7 +236,7 @@ static inline u64 readq(const volatile void __iomem *addr)
236236

237237
log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
238238
__io_br();
239-
val = __le64_to_cpu(__raw_readq(addr));
239+
val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
240240
__io_ar(val);
241241
log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
242242
return val;
@@ -287,7 +287,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
287287
{
288288
log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
289289
__io_bw();
290-
__raw_writeq(__cpu_to_le64(value), addr);
290+
__raw_writeq((u64 __force)__cpu_to_le64(value), addr);
291291
__io_aw();
292292
log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
293293
}
@@ -319,7 +319,7 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr)
319319
u16 val;
320320

321321
log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
322-
val = __le16_to_cpu(__raw_readw(addr));
322+
val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
323323
log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
324324
return val;
325325
}
@@ -332,7 +332,7 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr)
332332
u32 val;
333333

334334
log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
335-
val = __le32_to_cpu(__raw_readl(addr));
335+
val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
336336
log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
337337
return val;
338338
}
@@ -345,7 +345,7 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
345345
u64 val;
346346

347347
log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
348-
val = __le64_to_cpu(__raw_readq(addr));
348+
val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
349349
log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
350350
return val;
351351
}
@@ -366,7 +366,7 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
366366
static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
367367
{
368368
log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
369-
__raw_writew(cpu_to_le16(value), addr);
369+
__raw_writew((u16 __force)cpu_to_le16(value), addr);
370370
log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
371371
}
372372
#endif
@@ -376,7 +376,7 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
376376
static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
377377
{
378378
log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
379-
__raw_writel(__cpu_to_le32(value), addr);
379+
__raw_writel((u32 __force)__cpu_to_le32(value), addr);
380380
log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
381381
}
382382
#endif
@@ -386,7 +386,7 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
386386
static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
387387
{
388388
log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
389-
__raw_writeq(__cpu_to_le64(value), addr);
389+
__raw_writeq((u64 __force)__cpu_to_le64(value), addr);
390390
log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
391391
}
392392
#endif

0 commit comments

Comments
 (0)