Skip to content

Commit

Permalink
Disintegrate asm/system.h for Sparc
Browse files Browse the repository at this point in the history
Disintegrate asm/system.h for Sparc.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: sparclinux@vger.kernel.org
  • Loading branch information
dhowells committed Mar 28, 2012
1 parent e839ca5 commit d550bbd
Show file tree
Hide file tree
Showing 84 changed files with 684 additions and 669 deletions.
2 changes: 1 addition & 1 deletion arch/sparc/include/asm/atomic_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@

#include <linux/types.h>

#include <asm/cmpxchg.h>
#include <asm-generic/atomic64.h>

#include <asm/system.h>

#define ATOMIC_INIT(i) { (i) }

Expand Down
3 changes: 1 addition & 2 deletions arch/sparc/include/asm/atomic_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
#define __ARCH_SPARC64_ATOMIC__

#include <linux/types.h>
#include <asm/system.h>
#include <asm/cmpxchg.h>

#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
Expand Down Expand Up @@ -85,7 +85,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}


#define atomic64_cmpxchg(v, o, n) \
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
Expand Down
1 change: 0 additions & 1 deletion arch/sparc/include/asm/auxio_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
#ifndef _SPARC_AUXIO_H
#define _SPARC_AUXIO_H

#include <asm/system.h>
#include <asm/vaddrs.h>

/* This register is an unsigned char in IO space. It does two things.
Expand Down
8 changes: 8 additions & 0 deletions arch/sparc/include/asm/barrier.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
#ifndef ___ASM_SPARC_BARRIER_H
#define ___ASM_SPARC_BARRIER_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm/barrier_64.h>
#else
#include <asm/barrier_32.h>
#endif
#endif
15 changes: 15 additions & 0 deletions arch/sparc/include/asm/barrier_32.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#ifndef __SPARC_BARRIER_H
#define __SPARC_BARRIER_H

/* XXX Change this if we ever use a PSO mode kernel. */
#define mb() __asm__ __volatile__ ("" : : : "memory")
#define rmb() mb()
#define wmb() mb()
#define read_barrier_depends() do { } while(0)
#define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
#define smp_mb() __asm__ __volatile__("":::"memory")
#define smp_rmb() __asm__ __volatile__("":::"memory")
#define smp_wmb() __asm__ __volatile__("":::"memory")
#define smp_read_barrier_depends() do { } while(0)

#endif /* !(__SPARC_BARRIER_H) */
56 changes: 56 additions & 0 deletions arch/sparc/include/asm/barrier_64.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
#ifndef __SPARC64_BARRIER_H
#define __SPARC64_BARRIER_H

/* These are here in an effort to more fully work around Spitfire Errata
* #51. Essentially, if a memory barrier occurs soon after a mispredicted
* branch, the chip can stop executing instructions until a trap occurs.
* Therefore, if interrupts are disabled, the chip can hang forever.
*
* It used to be believed that the memory barrier had to be right in the
* delay slot, but a case has been traced recently wherein the memory barrier
* was one instruction after the branch delay slot and the chip still hung.
* The offending sequence was the following in sym_wakeup_done() of the
* sym53c8xx_2 driver:
*
* call sym_ccb_from_dsa, 0
* movge %icc, 0, %l0
* brz,pn %o0, .LL1303
* mov %o0, %l2
* membar #LoadLoad
*
* The branch has to be mispredicted for the bug to occur. Therefore, we put
* the memory barrier explicitly into a "branch always, predicted taken"
* delay slot to avoid the problem case.
*/
#define membar_safe(type) \
do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
" membar " type "\n" \
"1:\n" \
: : : "memory"); \
} while (0)

/* The kernel always executes in TSO memory model these days,
* and furthermore most sparc64 chips implement more stringent
* memory ordering than required by the specifications.
*/
#define mb() membar_safe("#StoreLoad")
#define rmb() __asm__ __volatile__("":::"memory")
#define wmb() __asm__ __volatile__("":::"memory")

#define read_barrier_depends() do { } while(0)
#define set_mb(__var, __value) \
do { __var = __value; membar_safe("#StoreLoad"); } while(0)

#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#else
#define smp_mb() __asm__ __volatile__("":::"memory")
#define smp_rmb() __asm__ __volatile__("":::"memory")
#define smp_wmb() __asm__ __volatile__("":::"memory")
#endif

#define smp_read_barrier_depends() do { } while(0)

#endif /* !(__SPARC64_BARRIER_H) */
3 changes: 3 additions & 0 deletions arch/sparc/include/asm/bug.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,7 @@ extern void do_BUG(const char *file, int line);

#include <asm-generic/bug.h>

struct pt_regs;
extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));

#endif
9 changes: 9 additions & 0 deletions arch/sparc/include/asm/cacheflush_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,4 +83,13 @@ extern void sparc_flush_page_to_ram(struct page *page);
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()

/* When a context switch happens we must flush all user windows so that
* the windows of the current process are flushed onto its stack. This
* way the windows are all clean for the next process and the stack
* frames are up to date.
*/
extern void flush_user_windows(void);
extern void kill_user_windows(void);
extern void flushw_all(void);

#endif /* _SPARC_CACHEFLUSH_H */
10 changes: 10 additions & 0 deletions arch/sparc/include/asm/cacheflush_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,16 @@

/* Cache flush operations. */


#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
#define flushw_all() __asm__ __volatile__("flushw")

extern void __flushw_user(void);
#define flushw_user() __flushw_user()

#define flush_user_windows flushw_user
#define flush_register_windows flushw_all

/* These are the same regardless of whether this is an SMP kernel or not. */
#define flush_cache_mm(__mm) \
do { if ((__mm) == current->mm) flushw_user(); } while(0)
Expand Down
8 changes: 8 additions & 0 deletions arch/sparc/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
#ifndef ___ASM_SPARC_CMPXCHG_H
#define ___ASM_SPARC_CMPXCHG_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm/cmpxchg_64.h>
#else
#include <asm/cmpxchg_32.h>
#endif
#endif
112 changes: 112 additions & 0 deletions arch/sparc/include/asm/cmpxchg_32.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
/* 32-bit atomic xchg() and cmpxchg() definitions.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
* Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
*
* Additions by Keith M Wesolowski (wesolows@foobazco.org) based
* on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
*/

#ifndef __ARCH_SPARC_CMPXCHG__
#define __ARCH_SPARC_CMPXCHG__

#include <asm/btfixup.h>

/* This has special calling conventions */
#ifndef CONFIG_SMP
BTFIXUPDEF_CALL(void, ___xchg32, void)
#endif

static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
{
#ifdef CONFIG_SMP
__asm__ __volatile__("swap [%2], %0"
: "=&r" (val)
: "0" (val), "r" (m)
: "memory");
return val;
#else
register unsigned long *ptr asm("g1");
register unsigned long ret asm("g2");

ptr = (unsigned long *) m;
ret = val;

/* Note: this is magic and the nop there is
really needed. */
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
"call ___f____xchg32\n\t"
" nop\n\t"
: "=&r" (ret)
: "0" (ret), "r" (ptr)
: "g3", "g4", "g7", "memory", "cc");

return ret;
#endif
}

extern void __xchg_called_with_bad_pointer(void);

static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
{
switch (size) {
case 4:
return xchg_u32(ptr, x);
}
__xchg_called_with_bad_pointer();
return x;
}

#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))

/* Emulate cmpxchg() the same way we emulate atomics,
* by hashing the object address and indexing into an array
* of spinlocks to get a bit of performance...
*
* See arch/sparc/lib/atomic32.c for implementation.
*
* Cribbed from <asm-parisc/atomic.h>
*/
#define __HAVE_ARCH_CMPXCHG 1

/* bug catcher for when unsupported size is used - won't link */
extern void __cmpxchg_called_with_bad_pointer(void);
/* we only need to support cmpxchg of a u32 on sparc */
extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);

/* don't worry...optimizer will get rid of most of this */
static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
{
switch (size) {
case 4:
return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
default:
__cmpxchg_called_with_bad_pointer();
break;
}
return old;
}

#define cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
(unsigned long)_n_, sizeof(*(ptr))); \
})

#include <asm-generic/cmpxchg-local.h>

/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))

#endif /* __ARCH_SPARC_CMPXCHG__ */
Loading

0 comments on commit d550bbd

Please sign in to comment.