1 /***********************license start***************
2 * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
24 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
25 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
26 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
27 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
28 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
29 * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
30 * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
31 * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
32 * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
35 * For any questions regarding licensing please contact marketing@caviumnetworks.com
37 ***********************license end**************************************/
47 * Implementation of spinlocks.
49 * <hr>$Revision: 41586 $<hr>
53 #ifndef __CVMX_SPINLOCK_H__
54 #define __CVMX_SPINLOCK_H__
62 /* Spinlocks for Octeon */
65 // define these to enable recursive spinlock debugging
66 //#define CVMX_SPINLOCK_DEBUG
70 * Spinlocks for Octeon
73 volatile uint32_t value;
76 // note - macros not expanded in inline ASM, so values hardcoded
77 #define CVMX_SPINLOCK_UNLOCKED_VAL 0
78 #define CVMX_SPINLOCK_LOCKED_VAL 1
81 #define CVMX_SPINLOCK_UNLOCKED_INITIALIZER {CVMX_SPINLOCK_UNLOCKED_VAL}
85 * Initialize a spinlock
87 * @param lock Lock to initialize
89 static inline void cvmx_spinlock_init(cvmx_spinlock_t *lock)
91 lock->value = CVMX_SPINLOCK_UNLOCKED_VAL;
96 * Return non-zero if the spinlock is currently locked
98 * @param lock Lock to check
99 * @return Non-zero if locked
101 static inline int cvmx_spinlock_locked(cvmx_spinlock_t *lock)
103 return (lock->value != CVMX_SPINLOCK_UNLOCKED_VAL);
110 * @param lock pointer to lock structure
112 static inline void cvmx_spinlock_unlock(cvmx_spinlock_t *lock)
121 * Attempts to take the lock, but does not spin if lock is not available.
122 * May take some time to acquire the lock even if it is available
123 * due to the ll/sc not succeeding.
125 * @param lock pointer to lock structure
127 * @return 0: lock successfully taken
128 * 1: lock not taken, held by someone else
129 * These return values match the Linux semantics.
132 static inline unsigned int cvmx_spinlock_trylock(cvmx_spinlock_t *lock)
136 __asm__ __volatile__(
138 "1: ll %[tmp], %[val] \n"
139 " bnez %[tmp], 2f \n" // if lock held, fail immediately
141 " sc %[tmp], %[val] \n"
142 " beqz %[tmp], 1b \n"
146 : [val] "+m" (lock->value), [tmp] "=&r" (tmp)
150 return (!!tmp); /* normalize to 0 or 1 */
154 * Gets lock, spins until lock is taken
156 * @param lock pointer to lock structure
158 static inline void cvmx_spinlock_lock(cvmx_spinlock_t *lock)
162 __asm__ __volatile__(
164 "1: ll %[tmp], %[val] \n"
165 " bnez %[tmp], 1b \n"
167 " sc %[tmp], %[val] \n"
168 " beqz %[tmp], 1b \n"
171 : [val] "+m" (lock->value), [tmp] "=&r" (tmp)
179 /** ********************************************************************
181 * These spinlocks use a single bit (bit 31) of a 32 bit word for locking.
182 * The rest of the bits in the word are left undisturbed. This enables more
183 * compact data structures as only 1 bit is consumed for the lock.
188 * Gets lock, spins until lock is taken
189 * Preserves the low 31 bits of the 32 bit
190 * word used for the lock.
193 * @param word word to lock bit 31 of
195 static inline void cvmx_spinlock_bit_lock(uint32_t *word)
200 __asm__ __volatile__(
203 "1: ll %[tmp], %[val] \n"
204 " bbit1 %[tmp], 31, 1b \n"
206 " ins %[tmp], $at, 31, 1 \n"
207 " sc %[tmp], %[val] \n"
208 " beqz %[tmp], 1b \n"
212 : [val] "+m" (*word), [tmp] "=&r" (tmp), [sav] "=&r" (sav)
219 * Attempts to get lock, returns immediately with success/failure
220 * Preserves the low 31 bits of the 32 bit
221 * word used for the lock.
224 * @param word word to lock bit 31 of
225 * @return 0: lock successfully taken
226 * 1: lock not taken, held by someone else
227 * These return values match the Linux semantics.
229 static inline unsigned int cvmx_spinlock_bit_trylock(uint32_t *word)
233 __asm__ __volatile__(
236 "1: ll %[tmp], %[val] \n"
237 " bbit1 %[tmp], 31, 2f \n" // if lock held, fail immediately
239 " ins %[tmp], $at, 31, 1 \n"
240 " sc %[tmp], %[val] \n"
241 " beqz %[tmp], 1b \n"
246 : [val] "+m" (*word), [tmp] "=&r" (tmp)
250 return (!!tmp); /* normalize to 0 or 1 */
255 * Unconditionally clears bit 31 of the lock word. Note that this is
256 * done non-atomically, as this implementation assumes that the rest
257 * of the bits in the word are protected by the lock.
259 * @param word word to unlock bit 31 in
261 static inline void cvmx_spinlock_bit_unlock(uint32_t *word)
264 *word &= ~(1UL << 31) ;
270 /** ********************************************************************
271 * Recursive spinlocks
274 volatile unsigned int value;
275 volatile unsigned int core_num;
276 } cvmx_spinlock_rec_t;
280 * Initialize a recursive spinlock
282 * @param lock Lock to initialize
284 static inline void cvmx_spinlock_rec_init(cvmx_spinlock_rec_t *lock)
286 lock->value = CVMX_SPINLOCK_UNLOCKED_VAL;
291 * Return non-zero if the recursive spinlock is currently locked
293 * @param lock Lock to check
294 * @return Non-zero if locked
296 static inline int cvmx_spinlock_rec_locked(cvmx_spinlock_rec_t *lock)
298 return (lock->value != CVMX_SPINLOCK_UNLOCKED_VAL);
303 * Unlocks one level of recursive spinlock. Lock is not unlocked
304 * unless this is the final unlock call for that spinlock
306 * @param lock ptr to recursive spinlock structure
308 static inline void cvmx_spinlock_rec_unlock(cvmx_spinlock_rec_t *lock);
310 #ifdef CVMX_SPINLOCK_DEBUG
311 #define cvmx_spinlock_rec_unlock(x) _int_cvmx_spinlock_rec_unlock((x), __FILE__, __LINE__)
312 static inline void _int_cvmx_spinlock_rec_unlock(cvmx_spinlock_rec_t *lock, char *filename, int linenum)
314 static inline void cvmx_spinlock_rec_unlock(cvmx_spinlock_rec_t *lock)
318 unsigned int temp, result;
320 core_num = cvmx_get_core_num();
322 #ifdef CVMX_SPINLOCK_DEBUG
324 if (lock->core_num != core_num)
326 cvmx_dprintf("ERROR: Recursive spinlock release attemped by non-owner! file: %s, line: %d\n", filename, linenum);
332 __asm__ __volatile__(
334 " addi %[tmp], %[pid], 0x80 \n"
335 " sw %[tmp], %[lid] # set lid to invalid value\n"
337 "1: ll %[tmp], %[val] \n"
338 " addu %[res], %[tmp], -1 # decrement lock count\n"
339 " sc %[res], %[val] \n"
340 " beqz %[res], 1b \n"
342 " beq %[tmp], %[res], 2f # res is 1 on successful sc \n"
344 " sw %[pid], %[lid] # set lid to pid, only if lock still held\n"
348 : [res] "=&r" (result), [tmp] "=&r" (temp), [val] "+m" (lock->value), [lid] "+m" (lock->core_num)
349 : [pid] "r" (core_num)
353 #ifdef CVMX_SPINLOCK_DEBUG
355 if (lock->value == ~0UL)
357 cvmx_dprintf("ERROR: Recursive spinlock released too many times! file: %s, line: %d\n", filename, linenum);
366 * Takes recursive spinlock for a given core. A core can take the lock multiple
367 * times, and the lock is released only when the corresponding number of
368 * unlocks have taken place.
370 * NOTE: This assumes only one thread per core, and that the core ID is used as
371 * the lock 'key'. (This implementation cannot be generalized to allow
372 * multiple threads to use the same key (core id) .)
374 * @param lock address of recursive spinlock structure. Note that this is
375 * distinct from the standard spinlock
377 static inline void cvmx_spinlock_rec_lock(cvmx_spinlock_rec_t *lock);
379 #ifdef CVMX_SPINLOCK_DEBUG
380 #define cvmx_spinlock_rec_lock(x) _int_cvmx_spinlock_rec_lock((x), __FILE__, __LINE__)
381 static inline void _int_cvmx_spinlock_rec_lock(cvmx_spinlock_rec_t *lock, char *filename, int linenum)
383 static inline void cvmx_spinlock_rec_lock(cvmx_spinlock_rec_t *lock)
388 volatile unsigned int tmp;
389 volatile int core_num;
391 core_num = cvmx_get_core_num();
394 __asm__ __volatile__(
396 "1: ll %[tmp], %[val] # load the count\n"
397 " bnez %[tmp], 2f # if count!=zero branch to 2\n"
398 " addu %[tmp], %[tmp], 1 \n"
399 " sc %[tmp], %[val] \n"
400 " beqz %[tmp], 1b # go back if not success\n"
402 " j 3f # go to write core_num \n"
403 "2: lw %[tmp], %[lid] # load the core_num \n"
404 " bne %[tmp], %[pid], 1b # core_num no match, restart\n"
406 " lw %[tmp], %[val] \n"
407 " addu %[tmp], %[tmp], 1 \n"
408 " sw %[tmp], %[val] # update the count\n"
409 "3: sw %[pid], %[lid] # store the core_num\n"
412 : [tmp] "=&r" (tmp), [val] "+m" (lock->value), [lid] "+m" (lock->core_num)
413 : [pid] "r" (core_num)
416 #ifdef CVMX_SPINLOCK_DEBUG
417 if (lock->core_num != core_num)
419 cvmx_dprintf("cvmx_spinlock_rec_lock: lock taken, but core_num is incorrect. file: %s, line: %d\n", filename, linenum);
430 #endif /* __CVMX_SPINLOCK_H__ */