1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Inc. nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
49 * Implementation of spinlocks.
51 * <hr>$Revision: 70030 $<hr>
55 #ifndef __CVMX_SPINLOCK_H__
56 #define __CVMX_SPINLOCK_H__
64 /* Spinlocks for Octeon */
67 // define these to enable recursive spinlock debugging
68 //#define CVMX_SPINLOCK_DEBUG
72 * Spinlocks for Octeon
75 volatile uint32_t value;
78 // note - macros not expanded in inline ASM, so values hardcoded
79 #define CVMX_SPINLOCK_UNLOCKED_VAL 0
80 #define CVMX_SPINLOCK_LOCKED_VAL 1
83 #define CVMX_SPINLOCK_UNLOCKED_INITIALIZER {CVMX_SPINLOCK_UNLOCKED_VAL}
87 * Initialize a spinlock
89 * @param lock Lock to initialize
91 static inline void cvmx_spinlock_init(cvmx_spinlock_t *lock)
93 lock->value = CVMX_SPINLOCK_UNLOCKED_VAL;
98 * Return non-zero if the spinlock is currently locked
100 * @param lock Lock to check
101 * @return Non-zero if locked
103 static inline int cvmx_spinlock_locked(cvmx_spinlock_t *lock)
105 return (lock->value != CVMX_SPINLOCK_UNLOCKED_VAL);
112 * @param lock pointer to lock structure
114 static inline void cvmx_spinlock_unlock(cvmx_spinlock_t *lock)
123 * Attempts to take the lock, but does not spin if lock is not available.
124 * May take some time to acquire the lock even if it is available
125 * due to the ll/sc not succeeding.
127 * @param lock pointer to lock structure
129 * @return 0: lock successfully taken
130 * 1: lock not taken, held by someone else
131 * These return values match the Linux semantics.
134 static inline unsigned int cvmx_spinlock_trylock(cvmx_spinlock_t *lock)
138 __asm__ __volatile__(
140 "1: ll %[tmp], %[val] \n"
141 " bnez %[tmp], 2f \n" // if lock held, fail immediately
143 " sc %[tmp], %[val] \n"
144 " beqz %[tmp], 1b \n"
148 : [val] "+m" (lock->value), [tmp] "=&r" (tmp)
152 return (!!tmp); /* normalize to 0 or 1 */
156 * Gets lock, spins until lock is taken
158 * @param lock pointer to lock structure
160 static inline void cvmx_spinlock_lock(cvmx_spinlock_t *lock)
164 __asm__ __volatile__(
166 "1: ll %[tmp], %[val] \n"
167 " bnez %[tmp], 1b \n"
169 " sc %[tmp], %[val] \n"
170 " beqz %[tmp], 1b \n"
173 : [val] "+m" (lock->value), [tmp] "=&r" (tmp)
181 /** ********************************************************************
183 * These spinlocks use a single bit (bit 31) of a 32 bit word for locking.
184 * The rest of the bits in the word are left undisturbed. This enables more
185 * compact data structures as only 1 bit is consumed for the lock.
190 * Gets lock, spins until lock is taken
191 * Preserves the low 31 bits of the 32 bit
192 * word used for the lock.
195 * @param word word to lock bit 31 of
197 static inline void cvmx_spinlock_bit_lock(uint32_t *word)
202 __asm__ __volatile__(
205 "1: ll %[tmp], %[val] \n"
206 " bbit1 %[tmp], 31, 1b \n"
208 " ins %[tmp], $at, 31, 1 \n"
209 " sc %[tmp], %[val] \n"
210 " beqz %[tmp], 1b \n"
214 : [val] "+m" (*word), [tmp] "=&r" (tmp), [sav] "=&r" (sav)
221 * Attempts to get lock, returns immediately with success/failure
222 * Preserves the low 31 bits of the 32 bit
223 * word used for the lock.
226 * @param word word to lock bit 31 of
227 * @return 0: lock successfully taken
228 * 1: lock not taken, held by someone else
229 * These return values match the Linux semantics.
231 static inline unsigned int cvmx_spinlock_bit_trylock(uint32_t *word)
235 __asm__ __volatile__(
238 "1: ll %[tmp], %[val] \n"
239 " bbit1 %[tmp], 31, 2f \n" // if lock held, fail immediately
241 " ins %[tmp], $at, 31, 1 \n"
242 " sc %[tmp], %[val] \n"
243 " beqz %[tmp], 1b \n"
248 : [val] "+m" (*word), [tmp] "=&r" (tmp)
252 return (!!tmp); /* normalize to 0 or 1 */
257 * Unconditionally clears bit 31 of the lock word. Note that this is
258 * done non-atomically, as this implementation assumes that the rest
259 * of the bits in the word are protected by the lock.
261 * @param word word to unlock bit 31 in
263 static inline void cvmx_spinlock_bit_unlock(uint32_t *word)
266 *word &= ~(1UL << 31) ;
272 /** ********************************************************************
273 * Recursive spinlocks
276 volatile unsigned int value;
277 volatile unsigned int core_num;
278 } cvmx_spinlock_rec_t;
282 * Initialize a recursive spinlock
284 * @param lock Lock to initialize
286 static inline void cvmx_spinlock_rec_init(cvmx_spinlock_rec_t *lock)
288 lock->value = CVMX_SPINLOCK_UNLOCKED_VAL;
293 * Return non-zero if the recursive spinlock is currently locked
295 * @param lock Lock to check
296 * @return Non-zero if locked
298 static inline int cvmx_spinlock_rec_locked(cvmx_spinlock_rec_t *lock)
300 return (lock->value != CVMX_SPINLOCK_UNLOCKED_VAL);
305 * Unlocks one level of recursive spinlock. Lock is not unlocked
306 * unless this is the final unlock call for that spinlock
308 * @param lock ptr to recursive spinlock structure
310 static inline void cvmx_spinlock_rec_unlock(cvmx_spinlock_rec_t *lock);
312 #ifdef CVMX_SPINLOCK_DEBUG
313 #define cvmx_spinlock_rec_unlock(x) _int_cvmx_spinlock_rec_unlock((x), __FILE__, __LINE__)
314 static inline void _int_cvmx_spinlock_rec_unlock(cvmx_spinlock_rec_t *lock, char *filename, int linenum)
316 static inline void cvmx_spinlock_rec_unlock(cvmx_spinlock_rec_t *lock)
320 unsigned int temp, result;
322 core_num = cvmx_get_core_num();
324 #ifdef CVMX_SPINLOCK_DEBUG
326 if (lock->core_num != core_num)
328 cvmx_dprintf("ERROR: Recursive spinlock release attemped by non-owner! file: %s, line: %d\n", filename, linenum);
334 __asm__ __volatile__(
336 " addi %[tmp], %[pid], 0x80 \n"
337 " sw %[tmp], %[lid] # set lid to invalid value\n"
339 "1: ll %[tmp], %[val] \n"
340 " addu %[res], %[tmp], -1 # decrement lock count\n"
341 " sc %[res], %[val] \n"
342 " beqz %[res], 1b \n"
344 " beq %[tmp], %[res], 2f # res is 1 on successful sc \n"
346 " sw %[pid], %[lid] # set lid to pid, only if lock still held\n"
350 : [res] "=&r" (result), [tmp] "=&r" (temp), [val] "+m" (lock->value), [lid] "+m" (lock->core_num)
351 : [pid] "r" (core_num)
355 #ifdef CVMX_SPINLOCK_DEBUG
357 if (lock->value == ~0UL)
359 cvmx_dprintf("ERROR: Recursive spinlock released too many times! file: %s, line: %d\n", filename, linenum);
368 * Takes recursive spinlock for a given core. A core can take the lock multiple
369 * times, and the lock is released only when the corresponding number of
370 * unlocks have taken place.
372 * NOTE: This assumes only one thread per core, and that the core ID is used as
373 * the lock 'key'. (This implementation cannot be generalized to allow
374 * multiple threads to use the same key (core id) .)
376 * @param lock address of recursive spinlock structure. Note that this is
377 * distinct from the standard spinlock
379 static inline void cvmx_spinlock_rec_lock(cvmx_spinlock_rec_t *lock);
381 #ifdef CVMX_SPINLOCK_DEBUG
382 #define cvmx_spinlock_rec_lock(x) _int_cvmx_spinlock_rec_lock((x), __FILE__, __LINE__)
383 static inline void _int_cvmx_spinlock_rec_lock(cvmx_spinlock_rec_t *lock, char *filename, int linenum)
385 static inline void cvmx_spinlock_rec_lock(cvmx_spinlock_rec_t *lock)
390 volatile unsigned int tmp;
391 volatile int core_num;
393 core_num = cvmx_get_core_num();
396 __asm__ __volatile__(
398 "1: ll %[tmp], %[val] # load the count\n"
399 " bnez %[tmp], 2f # if count!=zero branch to 2\n"
400 " addu %[tmp], %[tmp], 1 \n"
401 " sc %[tmp], %[val] \n"
402 " beqz %[tmp], 1b # go back if not success\n"
404 " j 3f # go to write core_num \n"
405 "2: lw %[tmp], %[lid] # load the core_num \n"
406 " bne %[tmp], %[pid], 1b # core_num no match, restart\n"
408 " lw %[tmp], %[val] \n"
409 " addu %[tmp], %[tmp], 1 \n"
410 " sw %[tmp], %[val] # update the count\n"
411 "3: sw %[pid], %[lid] # store the core_num\n"
414 : [tmp] "=&r" (tmp), [val] "+m" (lock->value), [lid] "+m" (lock->core_num)
415 : [pid] "r" (core_num)
418 #ifdef CVMX_SPINLOCK_DEBUG
419 if (lock->core_num != core_num)
421 cvmx_dprintf("cvmx_spinlock_rec_lock: lock taken, but core_num is incorrect. file: %s, line: %d\n", filename, linenum);
432 #endif /* __CVMX_SPINLOCK_H__ */