These are needed to use ubi/ubifs. Signed-off-by: Thomas Chou <thomas@wytron.com.tw> Signed-off-by: Scott McNutt <smcnutt@psyent.com>master
parent
d8b73dffa9
commit
3bef253f08
@ -0,0 +1,189 @@ |
||||
#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_ |
||||
#define _ASM_GENERIC_BITOPS_ATOMIC_H_ |
||||
|
||||
#include <asm/types.h> |
||||
#include <asm/system.h> |
||||
|
||||
#ifdef CONFIG_SMP |
||||
#include <asm/spinlock.h> |
||||
#include <asm/cache.h> /* we use L1_CACHE_BYTES */ |
||||
|
||||
/* Use an array of spinlocks for our atomic_ts.
|
||||
* Hash function to index into a different SPINLOCK. |
||||
* Since "a" is usually an address, use one spinlock per cacheline. |
||||
*/ |
||||
# define ATOMIC_HASH_SIZE 4 |
||||
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) |
||||
|
||||
extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; |
||||
|
||||
/* Can't use raw_spin_lock_irq because of #include problems, so
|
||||
* this is the substitute */ |
||||
#define _atomic_spin_lock_irqsave(l,f) do { \ |
||||
raw_spinlock_t *s = ATOMIC_HASH(l); \
|
||||
local_irq_save(f); \
|
||||
__raw_spin_lock(s); \
|
||||
} while(0) |
||||
|
||||
#define _atomic_spin_unlock_irqrestore(l,f) do { \ |
||||
raw_spinlock_t *s = ATOMIC_HASH(l); \
|
||||
__raw_spin_unlock(s); \
|
||||
local_irq_restore(f); \
|
||||
} while(0) |
||||
|
||||
|
||||
#else |
||||
# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) |
||||
# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) |
||||
#endif |
||||
|
||||
/*
|
||||
* NMI events can occur at any time, including when interrupts have been |
||||
* disabled by *_irqsave(). So you can get NMI events occurring while a |
||||
* *_bit function is holding a spin lock. If the NMI handler also wants |
||||
* to do bit manipulation (and they do) then you can get a deadlock |
||||
* between the original caller of *_bit() and the NMI handler. |
||||
* |
||||
* by Keith Owens |
||||
*/ |
||||
|
||||
/**
|
||||
* set_bit - Atomically set a bit in memory |
||||
* @nr: the bit to set |
||||
* @addr: the address to start counting from |
||||
* |
||||
* This function is atomic and may not be reordered. See __set_bit() |
||||
* if you do not require the atomic guarantees. |
||||
* |
||||
* Note: there are no guarantees that this function will not be reordered |
||||
* on non x86 architectures, so if you are writing portable code, |
||||
* make sure not to rely on its reordering guarantees. |
||||
* |
||||
* Note that @nr may be almost arbitrarily large; this function is not |
||||
* restricted to acting on a single-word quantity. |
||||
*/ |
||||
static inline void set_bit(int nr, volatile unsigned long *addr) |
||||
{ |
||||
unsigned long mask = BIT_MASK(nr); |
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
||||
unsigned long flags; |
||||
|
||||
_atomic_spin_lock_irqsave(p, flags); |
||||
*p |= mask; |
||||
_atomic_spin_unlock_irqrestore(p, flags); |
||||
} |
||||
|
||||
/**
|
||||
* clear_bit - Clears a bit in memory |
||||
* @nr: Bit to clear |
||||
* @addr: Address to start counting from |
||||
* |
||||
* clear_bit() is atomic and may not be reordered. However, it does |
||||
* not contain a memory barrier, so if it is used for locking purposes, |
||||
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
||||
* in order to ensure changes are visible on other processors. |
||||
*/ |
||||
static inline void clear_bit(int nr, volatile unsigned long *addr) |
||||
{ |
||||
unsigned long mask = BIT_MASK(nr); |
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
||||
unsigned long flags; |
||||
|
||||
_atomic_spin_lock_irqsave(p, flags); |
||||
*p &= ~mask; |
||||
_atomic_spin_unlock_irqrestore(p, flags); |
||||
} |
||||
|
||||
/**
|
||||
* change_bit - Toggle a bit in memory |
||||
* @nr: Bit to change |
||||
* @addr: Address to start counting from |
||||
* |
||||
* change_bit() is atomic and may not be reordered. It may be |
||||
* reordered on other architectures than x86. |
||||
* Note that @nr may be almost arbitrarily large; this function is not |
||||
* restricted to acting on a single-word quantity. |
||||
*/ |
||||
static inline void change_bit(int nr, volatile unsigned long *addr) |
||||
{ |
||||
unsigned long mask = BIT_MASK(nr); |
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
||||
unsigned long flags; |
||||
|
||||
_atomic_spin_lock_irqsave(p, flags); |
||||
*p ^= mask; |
||||
_atomic_spin_unlock_irqrestore(p, flags); |
||||
} |
||||
|
||||
/**
|
||||
* test_and_set_bit - Set a bit and return its old value |
||||
* @nr: Bit to set |
||||
* @addr: Address to count from |
||||
* |
||||
* This operation is atomic and cannot be reordered. |
||||
* It may be reordered on other architectures than x86. |
||||
* It also implies a memory barrier. |
||||
*/ |
||||
static inline int test_and_set_bit(int nr, volatile unsigned long *addr) |
||||
{ |
||||
unsigned long mask = BIT_MASK(nr); |
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
||||
unsigned long old; |
||||
unsigned long flags; |
||||
|
||||
_atomic_spin_lock_irqsave(p, flags); |
||||
old = *p; |
||||
*p = old | mask; |
||||
_atomic_spin_unlock_irqrestore(p, flags); |
||||
|
||||
return (old & mask) != 0; |
||||
} |
||||
|
||||
/**
|
||||
* test_and_clear_bit - Clear a bit and return its old value |
||||
* @nr: Bit to clear |
||||
* @addr: Address to count from |
||||
* |
||||
* This operation is atomic and cannot be reordered. |
||||
* It can be reorderdered on other architectures other than x86. |
||||
* It also implies a memory barrier. |
||||
*/ |
||||
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) |
||||
{ |
||||
unsigned long mask = BIT_MASK(nr); |
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
||||
unsigned long old; |
||||
unsigned long flags; |
||||
|
||||
_atomic_spin_lock_irqsave(p, flags); |
||||
old = *p; |
||||
*p = old & ~mask; |
||||
_atomic_spin_unlock_irqrestore(p, flags); |
||||
|
||||
return (old & mask) != 0; |
||||
} |
||||
|
||||
/**
|
||||
* test_and_change_bit - Change a bit and return its old value |
||||
* @nr: Bit to change |
||||
* @addr: Address to count from |
||||
* |
||||
* This operation is atomic and cannot be reordered. |
||||
* It also implies a memory barrier. |
||||
*/ |
||||
static inline int test_and_change_bit(int nr, volatile unsigned long *addr) |
||||
{ |
||||
unsigned long mask = BIT_MASK(nr); |
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
||||
unsigned long old; |
||||
unsigned long flags; |
||||
|
||||
_atomic_spin_lock_irqsave(p, flags); |
||||
old = *p; |
||||
*p = old ^ mask; |
||||
_atomic_spin_unlock_irqrestore(p, flags); |
||||
|
||||
return (old & mask) != 0; |
||||
} |
||||
|
||||
#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */ |
@ -0,0 +1,41 @@ |
||||
#ifndef _ASM_GENERIC_BITOPS_FFS_H_ |
||||
#define _ASM_GENERIC_BITOPS_FFS_H_ |
||||
|
||||
/**
|
||||
* ffs - find first bit set |
||||
* @x: the word to search |
||||
* |
||||
* This is defined the same way as |
||||
* the libc and compiler builtin ffs routines, therefore |
||||
* differs in spirit from the above ffz (man ffs). |
||||
*/ |
||||
static inline int ffs(int x) |
||||
{ |
||||
int r = 1; |
||||
|
||||
if (!x) |
||||
return 0; |
||||
if (!(x & 0xffff)) { |
||||
x >>= 16; |
||||
r += 16; |
||||
} |
||||
if (!(x & 0xff)) { |
||||
x >>= 8; |
||||
r += 8; |
||||
} |
||||
if (!(x & 0xf)) { |
||||
x >>= 4; |
||||
r += 4; |
||||
} |
||||
if (!(x & 3)) { |
||||
x >>= 2; |
||||
r += 2; |
||||
} |
||||
if (!(x & 1)) { |
||||
x >>= 1; |
||||
r += 1; |
||||
} |
||||
return r; |
||||
} |
||||
|
||||
#endif /* _ASM_GENERIC_BITOPS_FFS_H_ */ |
@ -0,0 +1,108 @@ |
||||
#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ |
||||
#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ |
||||
|
||||
#include <asm/types.h> |
||||
|
||||
/**
|
||||
* __set_bit - Set a bit in memory |
||||
* @nr: the bit to set |
||||
* @addr: the address to start counting from |
||||
* |
||||
* Unlike set_bit(), this function is non-atomic and may be reordered. |
||||
* If it's called on the same region of memory simultaneously, the effect |
||||
* may be that only one operation succeeds. |
||||
*/ |
||||
static inline void __set_bit(int nr, volatile unsigned long *addr) |
||||
{ |
||||
unsigned long mask = BIT_MASK(nr); |
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
||||
|
||||
*p |= mask; |
||||
} |
||||
|
||||
static inline void __clear_bit(int nr, volatile unsigned long *addr) |
||||
{ |
||||
unsigned long mask = BIT_MASK(nr); |
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
||||
|
||||
*p &= ~mask; |
||||
} |
||||
|
||||
/**
|
||||
* __change_bit - Toggle a bit in memory |
||||
* @nr: the bit to change |
||||
* @addr: the address to start counting from |
||||
* |
||||
* Unlike change_bit(), this function is non-atomic and may be reordered. |
||||
* If it's called on the same region of memory simultaneously, the effect |
||||
* may be that only one operation succeeds. |
||||
*/ |
||||
static inline void __change_bit(int nr, volatile unsigned long *addr) |
||||
{ |
||||
unsigned long mask = BIT_MASK(nr); |
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
||||
|
||||
*p ^= mask; |
||||
} |
||||
|
||||
/**
|
||||
* __test_and_set_bit - Set a bit and return its old value |
||||
* @nr: Bit to set |
||||
* @addr: Address to count from |
||||
* |
||||
* This operation is non-atomic and can be reordered. |
||||
* If two examples of this operation race, one can appear to succeed |
||||
* but actually fail. You must protect multiple accesses with a lock. |
||||
*/ |
||||
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) |
||||
{ |
||||
unsigned long mask = BIT_MASK(nr); |
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
||||
unsigned long old = *p; |
||||
|
||||
*p = old | mask; |
||||
return (old & mask) != 0; |
||||
} |
||||
|
||||
/**
|
||||
* __test_and_clear_bit - Clear a bit and return its old value |
||||
* @nr: Bit to clear |
||||
* @addr: Address to count from |
||||
* |
||||
* This operation is non-atomic and can be reordered. |
||||
* If two examples of this operation race, one can appear to succeed |
||||
* but actually fail. You must protect multiple accesses with a lock. |
||||
*/ |
||||
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) |
||||
{ |
||||
unsigned long mask = BIT_MASK(nr); |
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
||||
unsigned long old = *p; |
||||
|
||||
*p = old & ~mask; |
||||
return (old & mask) != 0; |
||||
} |
||||
|
||||
/* WARNING: non atomic and it can be reordered! */ |
||||
static inline int __test_and_change_bit(int nr, |
||||
volatile unsigned long *addr) |
||||
{ |
||||
unsigned long mask = BIT_MASK(nr); |
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
||||
unsigned long old = *p; |
||||
|
||||
*p = old ^ mask; |
||||
return (old & mask) != 0; |
||||
} |
||||
|
||||
/**
|
||||
* test_bit - Determine whether a bit is set |
||||
* @nr: bit number to test |
||||
* @addr: Address to start counting from |
||||
*/ |
||||
static inline int test_bit(int nr, const volatile unsigned long *addr) |
||||
{ |
||||
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); |
||||
} |
||||
|
||||
#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ |
Loading…
Reference in new issue