2016-02-09 13:48:59 -06:00
|
|
|
/****************************************************************************
|
|
|
|
|
* include/nuttx/spinlock.h
|
|
|
|
|
*
|
2024-10-02 15:59:15 +02:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
|
*
|
2021-01-29 14:39:49 +01:00
|
|
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
|
* contributor license agreements. See the NOTICE file distributed with
|
|
|
|
|
* this work for additional information regarding copyright ownership. The
|
|
|
|
|
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
|
|
|
|
* "License"); you may not use this file except in compliance with the
|
|
|
|
|
* License. You may obtain a copy of the License at
|
2016-02-09 13:48:59 -06:00
|
|
|
*
|
2021-01-29 14:39:49 +01:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2016-02-09 13:48:59 -06:00
|
|
|
*
|
2021-01-29 14:39:49 +01:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
|
* under the License.
|
2016-02-09 13:48:59 -06:00
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
|
|
#ifndef __INCLUDE_NUTTX_SPINLOCK_H
|
|
|
|
|
#define __INCLUDE_NUTTX_SPINLOCK_H
|
|
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Included Files
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
|
|
#include <nuttx/config.h>
|
2016-02-19 15:57:07 -06:00
|
|
|
|
2016-02-19 16:03:01 -06:00
|
|
|
#include <sys/types.h>
|
2024-07-03 15:10:49 +08:00
|
|
|
#include <assert.h>
|
2016-02-09 15:31:14 -06:00
|
|
|
#include <stdint.h>
|
|
|
|
|
|
2024-07-03 15:10:49 +08:00
|
|
|
#include <nuttx/compiler.h>
|
2021-05-14 10:03:23 +08:00
|
|
|
#include <nuttx/irq.h>
|
2024-10-14 10:43:19 +08:00
|
|
|
#include <nuttx/arch.h>
|
2021-05-14 10:03:23 +08:00
|
|
|
|
2024-07-26 19:35:53 +08:00
|
|
|
#if defined(CONFIG_TICKET_SPINLOCK) || defined(CONFIG_RW_SPINLOCK)
|
|
|
|
|
# include <nuttx/atomic.h>
|
|
|
|
|
#endif
|
|
|
|
|
|
2024-11-15 10:30:51 +08:00
|
|
|
#include <nuttx/spinlock_type.h>
|
|
|
|
|
|
2023-11-12 21:50:20 +08:00
|
|
|
#undef EXTERN
|
|
|
|
|
#if defined(__cplusplus)
|
|
|
|
|
#define EXTERN extern "C"
|
|
|
|
|
extern "C"
|
|
|
|
|
{
|
|
|
|
|
#else
|
|
|
|
|
#define EXTERN extern
|
|
|
|
|
#endif
|
|
|
|
|
|
2016-11-22 11:34:16 -06:00
|
|
|
/****************************************************************************
|
|
|
|
|
* Pre-processor Definitions
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
|
|
/* Memory barriers may be provided in arch/spinlock.h
|
|
|
|
|
*
|
|
|
|
|
* DMB - Data memory barrier. Assures writes are completed to memory.
|
2019-11-03 19:38:39 -06:00
|
|
|
* DSB - Data synchronization barrier.
|
2016-11-22 11:34:16 -06:00
|
|
|
*/
|
|
|
|
|
|
2016-11-26 08:47:03 -06:00
|
|
|
#undef __SP_UNLOCK_FUNCTION
|
|
|
|
|
#if !defined(SP_DMB)
|
2016-11-22 11:34:16 -06:00
|
|
|
# define SP_DMB()
|
2016-11-26 08:47:03 -06:00
|
|
|
#else
|
|
|
|
|
# define __SP_UNLOCK_FUNCTION 1
|
2016-11-22 11:34:16 -06:00
|
|
|
#endif
|
|
|
|
|
|
2016-11-26 08:47:03 -06:00
|
|
|
#if !defined(SP_DSB)
|
2016-11-22 11:34:16 -06:00
|
|
|
# define SP_DSB()
|
|
|
|
|
#endif
|
|
|
|
|
|
2020-12-07 21:04:16 +09:00
|
|
|
#if !defined(SP_WFE)
|
|
|
|
|
# define SP_WFE()
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#if !defined(SP_SEV)
|
|
|
|
|
# define SP_SEV()
|
|
|
|
|
#endif
|
|
|
|
|
|
2023-10-05 19:19:08 +00:00
|
|
|
#if !defined(__SP_UNLOCK_FUNCTION) && (defined(CONFIG_TICKET_SPINLOCK) || \
|
|
|
|
|
defined(CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS))
|
2016-11-28 10:33:46 -06:00
|
|
|
# define __SP_UNLOCK_FUNCTION 1
|
|
|
|
|
#endif
|
|
|
|
|
|
2016-02-09 13:48:59 -06:00
|
|
|
/****************************************************************************
|
|
|
|
|
* Public Function Prototypes
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2024-07-03 15:10:49 +08:00
|
|
|
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
|
|
|
|
|
void sched_note_spinlock_lock(FAR volatile spinlock_t *spinlock);
|
|
|
|
|
void sched_note_spinlock_locked(FAR volatile spinlock_t *spinlock);
|
|
|
|
|
void sched_note_spinlock_abort(FAR volatile spinlock_t *spinlock);
|
|
|
|
|
void sched_note_spinlock_unlock(FAR volatile spinlock_t *spinlock);
|
|
|
|
|
#else
|
|
|
|
|
# define sched_note_spinlock_lock(spinlock)
|
|
|
|
|
# define sched_note_spinlock_locked(spinlock)
|
|
|
|
|
# define sched_note_spinlock_abort(spinlock)
|
2024-08-04 20:12:39 +08:00
|
|
|
# define sched_note_spinlock_unlock(spinlock)
|
2024-07-03 15:10:49 +08:00
|
|
|
#endif
|
|
|
|
|
|
2024-08-04 21:07:41 +08:00
|
|
|
/****************************************************************************
|
|
|
|
|
* Public Data Types
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2016-02-09 13:48:59 -06:00
|
|
|
/****************************************************************************
|
|
|
|
|
* Name: up_testset
|
|
|
|
|
*
|
|
|
|
|
* Description:
|
2016-10-19 10:07:44 -06:00
|
|
|
* Perform an atomic test and set operation on the provided spinlock.
|
2016-02-09 13:48:59 -06:00
|
|
|
*
|
2018-02-04 12:22:03 -06:00
|
|
|
* This function must be provided via the architecture-specific logic.
|
2016-02-09 13:48:59 -06:00
|
|
|
*
|
|
|
|
|
* Input Parameters:
|
2021-08-24 15:09:38 +02:00
|
|
|
* lock - A reference to the spinlock object.
|
2016-02-09 13:48:59 -06:00
|
|
|
*
|
|
|
|
|
* Returned Value:
|
2021-08-24 15:09:38 +02:00
|
|
|
* The spinlock is always locked upon return. The previous value of the
|
|
|
|
|
* spinlock variable is returned, either SP_LOCKED if the spinlock was
|
|
|
|
|
* previously locked (meaning that the test-and-set operation failed to
|
2016-02-09 13:48:59 -06:00
|
|
|
* obtain the lock) or SP_UNLOCKED if the spinlock was previously unlocked
|
2021-08-24 15:09:38 +02:00
|
|
|
* (meaning that we successfully obtained the lock).
|
2016-02-09 13:48:59 -06:00
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2020-12-30 14:24:51 +08:00
|
|
|
#if defined(CONFIG_ARCH_HAVE_TESTSET)
|
2023-06-16 23:05:09 +03:00
|
|
|
spinlock_t up_testset(FAR volatile spinlock_t *lock);
|
2024-03-22 16:17:00 +08:00
|
|
|
#else
|
2023-06-16 23:05:09 +03:00
|
|
|
static inline spinlock_t up_testset(FAR volatile spinlock_t *lock)
|
2020-12-30 14:24:51 +08:00
|
|
|
{
|
|
|
|
|
irqstate_t flags;
|
|
|
|
|
spinlock_t ret;
|
|
|
|
|
|
|
|
|
|
flags = up_irq_save();
|
|
|
|
|
|
|
|
|
|
ret = *lock;
|
|
|
|
|
|
|
|
|
|
if (ret == SP_UNLOCKED)
|
|
|
|
|
{
|
|
|
|
|
*lock = SP_LOCKED;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
up_irq_restore(flags);
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
2016-02-09 13:48:59 -06:00
|
|
|
|
2023-10-04 23:39:56 +08:00
|
|
|
/****************************************************************************
|
|
|
|
|
* Name: spin_lock_init
|
|
|
|
|
*
|
|
|
|
|
* Description:
|
|
|
|
|
* Initialize a non-reentrant spinlock object to its initial,
|
|
|
|
|
* unlocked state.
|
|
|
|
|
*
|
|
|
|
|
* Input Parameters:
|
|
|
|
|
* lock - A reference to the spinlock object to be initialized.
|
|
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* None.
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
|
|
/* void spin_lock_init(FAR spinlock_t *lock); */
|
|
|
|
|
#define spin_lock_init(l) do { *(l) = SP_UNLOCKED; } while (0)
|
|
|
|
|
|
2016-02-09 13:48:59 -06:00
|
|
|
/****************************************************************************
|
2024-08-04 20:14:27 +08:00
|
|
|
* Name: spin_lock_wo_note
|
2016-02-09 13:48:59 -06:00
|
|
|
*
|
|
|
|
|
* Description:
|
2016-02-09 15:31:14 -06:00
|
|
|
* If this CPU does not already hold the spinlock, then loop until the
|
|
|
|
|
* spinlock is successfully locked.
|
2016-02-09 13:48:59 -06:00
|
|
|
*
|
2024-08-04 20:14:27 +08:00
|
|
|
* This implementation is the same as the above spin_lock() except that
|
|
|
|
|
* it does not perform instrumentation logic.
|
2016-02-12 14:55:31 -06:00
|
|
|
*
|
|
|
|
|
* Input Parameters:
|
|
|
|
|
* lock - A reference to the spinlock object to lock.
|
|
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* None. When the function returns, the spinlock was successfully locked
|
|
|
|
|
* by this CPU.
|
|
|
|
|
*
|
|
|
|
|
* Assumptions:
|
|
|
|
|
* Not running at the interrupt level.
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2024-08-04 21:41:45 +08:00
|
|
|
#ifdef CONFIG_SPINLOCK
|
2024-08-04 20:14:27 +08:00
|
|
|
static inline_function void spin_lock_wo_note(FAR volatile spinlock_t *lock)
|
2024-07-03 15:10:49 +08:00
|
|
|
{
|
|
|
|
|
#ifdef CONFIG_TICKET_SPINLOCK
|
2024-11-20 20:53:28 +08:00
|
|
|
int ticket = atomic_fetch_add(&lock->next, 1);
|
|
|
|
|
while (atomic_read(&lock->owner) != ticket)
|
2024-08-04 20:14:27 +08:00
|
|
|
#else /* CONFIG_TICKET_SPINLOCK */
|
2024-07-03 15:10:49 +08:00
|
|
|
while (up_testset(lock) == SP_LOCKED)
|
|
|
|
|
#endif
|
|
|
|
|
{
|
|
|
|
|
SP_DSB();
|
|
|
|
|
SP_WFE();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
SP_DMB();
|
|
|
|
|
}
|
2024-08-04 21:41:45 +08:00
|
|
|
#endif /* CONFIG_SPINLOCK */
|
2016-02-12 14:55:31 -06:00
|
|
|
|
2018-01-17 13:08:03 +09:00
|
|
|
/****************************************************************************
|
2024-08-04 20:14:27 +08:00
|
|
|
* Name: spin_lock
|
2018-01-17 13:08:03 +09:00
|
|
|
*
|
|
|
|
|
* Description:
|
|
|
|
|
* If this CPU does not already hold the spinlock, then loop until the
|
|
|
|
|
* spinlock is successfully locked.
|
|
|
|
|
*
|
2024-08-04 20:14:27 +08:00
|
|
|
* This implementation is non-reentrant and is prone to deadlocks in
|
|
|
|
|
* the case that any logic on the same CPU attempts to take the lock
|
|
|
|
|
* more than once.
|
2018-01-17 13:08:03 +09:00
|
|
|
*
|
|
|
|
|
* Input Parameters:
|
|
|
|
|
* lock - A reference to the spinlock object to lock.
|
|
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* None. When the function returns, the spinlock was successfully locked
|
|
|
|
|
* by this CPU.
|
|
|
|
|
*
|
|
|
|
|
* Assumptions:
|
|
|
|
|
* Not running at the interrupt level.
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2024-08-04 21:41:45 +08:00
|
|
|
#ifdef CONFIG_SPINLOCK
|
2024-08-04 20:14:27 +08:00
|
|
|
static inline_function void spin_lock(FAR volatile spinlock_t *lock)
|
2024-07-03 15:10:49 +08:00
|
|
|
{
|
2024-08-04 20:14:27 +08:00
|
|
|
/* Notify that we are waiting for a spinlock */
|
2024-07-03 15:10:49 +08:00
|
|
|
|
2024-08-04 20:14:27 +08:00
|
|
|
sched_note_spinlock_lock(lock);
|
|
|
|
|
|
|
|
|
|
/* Lock without trace note */
|
|
|
|
|
|
|
|
|
|
spin_lock_wo_note(lock);
|
|
|
|
|
|
|
|
|
|
/* Notify that we have the spinlock */
|
|
|
|
|
|
|
|
|
|
sched_note_spinlock_locked(lock);
|
2024-07-03 15:10:49 +08:00
|
|
|
}
|
2024-08-04 21:41:45 +08:00
|
|
|
#endif /* CONFIG_SPINLOCK */
|
2018-01-17 13:08:03 +09:00
|
|
|
|
2016-11-24 13:33:43 -06:00
|
|
|
/****************************************************************************
|
2024-08-04 20:14:27 +08:00
|
|
|
* Name: spin_trylock_wo_note
|
2016-11-24 13:33:43 -06:00
|
|
|
*
|
|
|
|
|
* Description:
|
|
|
|
|
* Try once to lock the spinlock. Do not wait if the spinlock is already
|
|
|
|
|
* locked.
|
|
|
|
|
*
|
2024-08-04 20:14:27 +08:00
|
|
|
* This implementation is the same as the above spin_trylock() except that
|
|
|
|
|
* it does not perform instrumentation logic.
|
|
|
|
|
*
|
2016-11-24 13:33:43 -06:00
|
|
|
* Input Parameters:
|
|
|
|
|
* lock - A reference to the spinlock object to lock.
|
|
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* SP_LOCKED - Failure, the spinlock was already locked
|
|
|
|
|
* SP_UNLOCKED - Success, the spinlock was successfully locked
|
|
|
|
|
*
|
|
|
|
|
* Assumptions:
|
|
|
|
|
* Not running at the interrupt level.
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2024-08-04 21:41:45 +08:00
|
|
|
#ifdef CONFIG_SPINLOCK
|
2024-08-04 20:14:27 +08:00
|
|
|
static inline_function bool
|
|
|
|
|
spin_trylock_wo_note(FAR volatile spinlock_t *lock)
|
2024-07-03 15:10:49 +08:00
|
|
|
{
|
|
|
|
|
#ifdef CONFIG_TICKET_SPINLOCK
|
2024-11-20 20:53:28 +08:00
|
|
|
if (!atomic_cmpxchg(&lock->next, &lock->owner,
|
|
|
|
|
atomic_read(&lock->next) + 1))
|
2024-07-03 15:10:49 +08:00
|
|
|
#else /* CONFIG_TICKET_SPINLOCK */
|
|
|
|
|
if (up_testset(lock) == SP_LOCKED)
|
|
|
|
|
#endif /* CONFIG_TICKET_SPINLOCK */
|
|
|
|
|
{
|
|
|
|
|
SP_DSB();
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
SP_DMB();
|
|
|
|
|
return true;
|
|
|
|
|
}
|
2024-08-04 21:41:45 +08:00
|
|
|
#endif /* CONFIG_SPINLOCK */
|
2019-11-03 19:40:58 -06:00
|
|
|
|
|
|
|
|
/****************************************************************************
|
2024-08-04 20:14:27 +08:00
|
|
|
* Name: spin_trylock
|
2019-11-03 19:40:58 -06:00
|
|
|
*
|
|
|
|
|
* Description:
|
|
|
|
|
* Try once to lock the spinlock. Do not wait if the spinlock is already
|
|
|
|
|
* locked.
|
|
|
|
|
*
|
|
|
|
|
* Input Parameters:
|
|
|
|
|
* lock - A reference to the spinlock object to lock.
|
|
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* SP_LOCKED - Failure, the spinlock was already locked
|
|
|
|
|
* SP_UNLOCKED - Success, the spinlock was successfully locked
|
|
|
|
|
*
|
|
|
|
|
* Assumptions:
|
|
|
|
|
* Not running at the interrupt level.
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2024-08-04 21:41:45 +08:00
|
|
|
#ifdef CONFIG_SPINLOCK
|
2024-08-04 20:14:27 +08:00
|
|
|
static inline_function bool spin_trylock(FAR volatile spinlock_t *lock)
|
2024-07-03 15:10:49 +08:00
|
|
|
{
|
2024-08-04 20:14:27 +08:00
|
|
|
bool locked;
|
2024-07-03 15:10:49 +08:00
|
|
|
|
2024-08-04 20:14:27 +08:00
|
|
|
/* Notify that we are waiting for a spinlock */
|
2024-07-03 15:10:49 +08:00
|
|
|
|
2024-08-04 20:14:27 +08:00
|
|
|
sched_note_spinlock_lock(lock);
|
|
|
|
|
|
|
|
|
|
/* Try lock without trace note */
|
|
|
|
|
|
|
|
|
|
locked = spin_trylock_wo_note(lock);
|
|
|
|
|
if (locked)
|
2024-07-03 15:10:49 +08:00
|
|
|
{
|
2024-08-04 20:14:27 +08:00
|
|
|
/* Notify that we have the spinlock */
|
2024-07-03 15:10:49 +08:00
|
|
|
|
2024-08-04 20:14:27 +08:00
|
|
|
sched_note_spinlock_locked(lock);
|
|
|
|
|
}
|
|
|
|
|
else
|
2024-07-03 15:10:49 +08:00
|
|
|
{
|
2024-08-04 20:14:27 +08:00
|
|
|
/* Notify that we abort for a spinlock */
|
|
|
|
|
|
|
|
|
|
sched_note_spinlock_abort(lock);
|
2024-07-03 15:10:49 +08:00
|
|
|
}
|
|
|
|
|
|
2024-08-04 20:14:27 +08:00
|
|
|
return locked;
|
2024-07-03 15:10:49 +08:00
|
|
|
}
|
2024-08-04 21:41:45 +08:00
|
|
|
#endif /* CONFIG_SPINLOCK */
|
2016-11-24 13:33:43 -06:00
|
|
|
|
2016-02-09 15:31:14 -06:00
|
|
|
/****************************************************************************
|
2024-08-04 20:14:27 +08:00
|
|
|
* Name: spin_unlock_wo_note
|
2016-02-09 15:31:14 -06:00
|
|
|
*
|
|
|
|
|
* Description:
|
2016-02-12 14:55:31 -06:00
|
|
|
* Release one count on a non-reentrant spinlock.
|
2016-02-09 15:31:14 -06:00
|
|
|
*
|
2024-08-04 20:14:27 +08:00
|
|
|
* This implementation is the same as the above spin_unlock() except that
|
|
|
|
|
* it does not perform instrumentation logic.
|
|
|
|
|
*
|
2016-02-09 15:31:14 -06:00
|
|
|
* Input Parameters:
|
|
|
|
|
* lock - A reference to the spinlock object to unlock.
|
|
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* None.
|
|
|
|
|
*
|
|
|
|
|
* Assumptions:
|
|
|
|
|
* Not running at the interrupt level.
|
2016-02-09 13:48:59 -06:00
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2024-08-04 21:41:45 +08:00
|
|
|
#ifdef CONFIG_SPINLOCK
|
2024-08-04 20:14:27 +08:00
|
|
|
static inline_function void
|
|
|
|
|
spin_unlock_wo_note(FAR volatile spinlock_t *lock)
|
2024-07-03 15:10:49 +08:00
|
|
|
{
|
|
|
|
|
SP_DMB();
|
2024-08-04 20:14:27 +08:00
|
|
|
#ifdef CONFIG_TICKET_SPINLOCK
|
2024-11-20 20:53:28 +08:00
|
|
|
atomic_fetch_add(&lock->owner, 1);
|
2024-08-04 20:14:27 +08:00
|
|
|
#else
|
2024-07-03 15:10:49 +08:00
|
|
|
*lock = SP_UNLOCKED;
|
2024-08-04 20:14:27 +08:00
|
|
|
#endif
|
2024-07-03 15:10:49 +08:00
|
|
|
SP_DSB();
|
|
|
|
|
SP_SEV();
|
|
|
|
|
}
|
2024-08-04 21:41:45 +08:00
|
|
|
#endif /* CONFIG_SPINLOCK */
|
2016-02-12 14:55:31 -06:00
|
|
|
|
2018-01-17 13:08:03 +09:00
|
|
|
/****************************************************************************
|
2024-08-04 20:14:27 +08:00
|
|
|
* Name: spin_unlock
|
2018-01-17 13:08:03 +09:00
|
|
|
*
|
|
|
|
|
* Description:
|
|
|
|
|
* Release one count on a non-reentrant spinlock.
|
|
|
|
|
*
|
|
|
|
|
* Input Parameters:
|
|
|
|
|
* lock - A reference to the spinlock object to unlock.
|
|
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* None.
|
|
|
|
|
*
|
|
|
|
|
* Assumptions:
|
|
|
|
|
* Not running at the interrupt level.
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2024-08-04 21:41:45 +08:00
|
|
|
#ifdef CONFIG_SPINLOCK
|
|
|
|
|
# ifdef __SP_UNLOCK_FUNCTION
|
2024-08-04 20:14:27 +08:00
|
|
|
static inline_function void spin_unlock(FAR volatile spinlock_t *lock)
|
2024-07-03 15:10:49 +08:00
|
|
|
{
|
2024-08-04 20:14:27 +08:00
|
|
|
/* Unlock without trace note */
|
|
|
|
|
|
|
|
|
|
spin_unlock_wo_note(lock);
|
|
|
|
|
|
|
|
|
|
/* Notify that we are unlocking the spinlock */
|
|
|
|
|
|
|
|
|
|
sched_note_spinlock_unlock(lock);
|
|
|
|
|
}
|
2024-08-04 21:41:45 +08:00
|
|
|
# else
|
|
|
|
|
# define spin_unlock(l) do { *(l) = SP_UNLOCKED; } while (0)
|
|
|
|
|
# endif
|
|
|
|
|
#endif /* CONFIG_SPINLOCK */
|
2018-01-17 13:08:03 +09:00
|
|
|
|
2016-02-12 14:55:31 -06:00
|
|
|
/****************************************************************************
|
2023-10-06 13:58:58 +08:00
|
|
|
* Name: spin_is_locked
|
2016-02-12 14:55:31 -06:00
|
|
|
*
|
|
|
|
|
* Description:
|
2016-02-15 11:06:54 -06:00
|
|
|
* Release one count on a non-reentrant spinlock.
|
2016-02-12 14:55:31 -06:00
|
|
|
*
|
|
|
|
|
* Input Parameters:
|
|
|
|
|
* lock - A reference to the spinlock object to test.
|
|
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* A boolean value: true the spinlock is locked; false if it is unlocked.
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
|
|
/* bool spin_islocked(FAR spinlock_t lock); */
|
2023-10-05 19:19:08 +00:00
|
|
|
#ifdef CONFIG_TICKET_SPINLOCK
|
2024-11-20 20:53:28 +08:00
|
|
|
# define spin_is_locked(l) \
|
|
|
|
|
(atomic_read(&(*l).owner) != atomic_read(&(*l).next))
|
2023-10-05 19:19:08 +00:00
|
|
|
#else
|
2023-10-06 13:58:58 +08:00
|
|
|
# define spin_is_locked(l) (*(l) == SP_LOCKED)
|
2023-10-05 19:19:08 +00:00
|
|
|
#endif
|
2016-02-12 14:55:31 -06:00
|
|
|
|
2024-08-04 21:07:41 +08:00
|
|
|
/****************************************************************************
|
|
|
|
|
* Name: spin_lock_irqsave_wo_note
|
|
|
|
|
*
|
|
|
|
|
* Description:
|
|
|
|
|
* This function is no trace version of spin_lock_irqsave()
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2024-08-04 21:41:45 +08:00
|
|
|
#ifdef CONFIG_SPINLOCK
|
2024-08-04 21:07:41 +08:00
|
|
|
static inline_function
|
|
|
|
|
irqstate_t spin_lock_irqsave_wo_note(FAR volatile spinlock_t *lock)
|
|
|
|
|
{
|
2025-01-08 16:56:34 +08:00
|
|
|
irqstate_t flags;
|
|
|
|
|
flags = up_irq_save();
|
2024-08-04 21:07:41 +08:00
|
|
|
|
2025-01-08 16:56:34 +08:00
|
|
|
spin_lock_wo_note(lock);
|
2024-08-04 21:07:41 +08:00
|
|
|
|
2025-01-08 16:56:34 +08:00
|
|
|
return flags;
|
2024-08-04 21:07:41 +08:00
|
|
|
}
|
|
|
|
|
#else
|
|
|
|
|
# define spin_lock_irqsave_wo_note(l) ((void)(l), up_irq_save())
|
|
|
|
|
#endif
|
|
|
|
|
|
2021-05-14 10:03:23 +08:00
|
|
|
/****************************************************************************
|
|
|
|
|
* Name: spin_lock_irqsave
|
|
|
|
|
*
|
|
|
|
|
* Description:
|
2023-10-15 15:13:11 +00:00
|
|
|
* If SMP is enabled:
|
2025-01-08 16:56:34 +08:00
|
|
|
* Disable local interrupts and take the lock spinlock and return
|
2021-05-14 10:03:23 +08:00
|
|
|
* the interrupt state.
|
|
|
|
|
*
|
|
|
|
|
* NOTE: This API is very simple to protect data (e.g. H/W register
|
|
|
|
|
* or internal data structure) in SMP mode. But do not use this API
|
|
|
|
|
* with kernel APIs which suspend a caller thread. (e.g. nxsem_wait)
|
|
|
|
|
*
|
|
|
|
|
* If SMP is not enabled:
|
|
|
|
|
* This function is equivalent to up_irq_save().
|
|
|
|
|
*
|
|
|
|
|
* Input Parameters:
|
2025-01-08 16:56:34 +08:00
|
|
|
* lock - Caller specific spinlock. not NULL.
|
2021-05-14 10:03:23 +08:00
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* An opaque, architecture-specific value that represents the state of
|
|
|
|
|
* the interrupts prior to the call to spin_lock_irqsave(lock);
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2024-08-04 21:41:45 +08:00
|
|
|
#ifdef CONFIG_SPINLOCK
|
2024-08-04 21:07:41 +08:00
|
|
|
static inline_function
|
|
|
|
|
irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock)
|
|
|
|
|
{
|
|
|
|
|
irqstate_t flags;
|
|
|
|
|
|
|
|
|
|
/* Notify that we are waiting for a spinlock */
|
|
|
|
|
|
|
|
|
|
sched_note_spinlock_lock(lock);
|
|
|
|
|
|
|
|
|
|
/* Lock without trace note */
|
|
|
|
|
|
|
|
|
|
flags = spin_lock_irqsave_wo_note(lock);
|
|
|
|
|
|
|
|
|
|
/* Notify that we have the spinlock */
|
|
|
|
|
|
|
|
|
|
sched_note_spinlock_locked(lock);
|
|
|
|
|
|
|
|
|
|
return flags;
|
|
|
|
|
}
|
2021-05-14 10:03:23 +08:00
|
|
|
#else
|
2021-09-11 14:07:11 +02:00
|
|
|
# define spin_lock_irqsave(l) ((void)(l), up_irq_save())
|
2021-05-14 10:03:23 +08:00
|
|
|
#endif
|
|
|
|
|
|
2024-08-04 22:30:16 +08:00
|
|
|
/****************************************************************************
|
|
|
|
|
* Name: spin_trylock_irqsave_wo_note
|
|
|
|
|
*
|
|
|
|
|
* Description:
|
|
|
|
|
* Try once to lock the spinlock. Do not wait if the spinlock is already
|
|
|
|
|
* locked.
|
|
|
|
|
*
|
|
|
|
|
* This implementation is the same as the above spin_trylock() except that
|
|
|
|
|
* it does not perform instrumentation logic.
|
|
|
|
|
*
|
|
|
|
|
* Input Parameters:
|
|
|
|
|
* lock - A reference to the spinlock object to lock.
|
|
|
|
|
* flags - flag of interrupts status
|
|
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* SP_LOCKED - Failure, the spinlock was already locked
|
|
|
|
|
* SP_UNLOCKED - Success, the spinlock was successfully locked
|
|
|
|
|
*
|
|
|
|
|
* Assumptions:
|
|
|
|
|
* Not running at the interrupt level.
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_SPINLOCK
|
|
|
|
|
# define spin_trylock_irqsave_wo_note(l, f) \
|
|
|
|
|
({ \
|
|
|
|
|
f = up_irq_save(); \
|
|
|
|
|
spin_trylock_wo_note(l) ? \
|
|
|
|
|
true : ({ up_irq_restore(f); false; }); \
|
|
|
|
|
})
|
|
|
|
|
#else
|
|
|
|
|
# define spin_trylock_irqsave_wo_note(l, f) \
|
|
|
|
|
({ \
|
|
|
|
|
(void)(l); \
|
|
|
|
|
f = up_irq_save(); \
|
|
|
|
|
true; \
|
|
|
|
|
})
|
|
|
|
|
#endif /* CONFIG_SPINLOCK */
|
|
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Name: spin_trylock_irqsave
|
|
|
|
|
*
|
|
|
|
|
* Description:
|
|
|
|
|
* Try once to lock the spinlock. Do not wait if the spinlock is already
|
|
|
|
|
* locked.
|
|
|
|
|
*
|
|
|
|
|
* Input Parameters:
|
|
|
|
|
* lock - A reference to the spinlock object to lock.
|
|
|
|
|
* flags - flag of interrupts status
|
|
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* SP_LOCKED - Failure, the spinlock was already locked
|
|
|
|
|
* SP_UNLOCKED - Success, the spinlock was successfully locked
|
|
|
|
|
*
|
|
|
|
|
* Assumptions:
|
|
|
|
|
* Not running at the interrupt level.
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_SPINLOCK
|
|
|
|
|
# define spin_trylock_irqsave(l, f) \
|
|
|
|
|
({ \
|
|
|
|
|
f = up_irq_save(); \
|
|
|
|
|
spin_trylock(l) ? \
|
|
|
|
|
true : ({ up_irq_restore(f); false; }); \
|
|
|
|
|
})
|
|
|
|
|
#else
|
|
|
|
|
# define spin_trylock_irqsave(l, f) \
|
|
|
|
|
({ \
|
|
|
|
|
(void)(l); \
|
|
|
|
|
f = up_irq_save(); \
|
|
|
|
|
true; \
|
|
|
|
|
})
|
|
|
|
|
#endif /* CONFIG_SPINLOCK */
|
|
|
|
|
|
2022-12-28 14:30:38 +08:00
|
|
|
/****************************************************************************
|
2024-08-04 21:07:41 +08:00
|
|
|
* Name: spin_unlock_irqrestore_wo_note
|
|
|
|
|
*
|
|
|
|
|
* Description:
|
|
|
|
|
* This function is no trace version of spin_unlock_irqrestore()
|
|
|
|
|
*
|
2022-12-28 14:30:38 +08:00
|
|
|
****************************************************************************/
|
|
|
|
|
|
2024-08-04 21:41:45 +08:00
|
|
|
#ifdef CONFIG_SPINLOCK
|
2024-08-04 21:07:41 +08:00
|
|
|
static inline_function
|
|
|
|
|
void spin_unlock_irqrestore_wo_note(FAR volatile spinlock_t *lock,
|
|
|
|
|
irqstate_t flags)
|
|
|
|
|
{
|
2025-01-08 16:56:34 +08:00
|
|
|
spin_unlock_wo_note(lock);
|
2024-08-04 21:07:41 +08:00
|
|
|
|
|
|
|
|
up_irq_restore(flags);
|
|
|
|
|
}
|
2022-12-28 14:30:38 +08:00
|
|
|
#else
|
2024-08-04 21:07:41 +08:00
|
|
|
# define spin_unlock_irqrestore_wo_note(l, f) ((void)(l), up_irq_restore(f))
|
2022-12-28 14:30:38 +08:00
|
|
|
#endif
|
|
|
|
|
|
2021-05-14 10:03:23 +08:00
|
|
|
/****************************************************************************
|
|
|
|
|
* Name: spin_unlock_irqrestore
|
|
|
|
|
*
|
|
|
|
|
* Description:
|
|
|
|
|
* If SMP is enabled:
|
2025-01-08 16:56:34 +08:00
|
|
|
* Release the lock and restore the interrupt state as it was prior
|
|
|
|
|
* to the previous call to spin_lock_irqsave(lock).
|
2021-05-14 10:03:23 +08:00
|
|
|
*
|
|
|
|
|
* If SMP is not enabled:
|
|
|
|
|
* This function is equivalent to up_irq_restore().
|
|
|
|
|
*
|
|
|
|
|
* Input Parameters:
|
2025-01-08 16:56:34 +08:00
|
|
|
* lock - Caller specific spinlock. not NULL
|
2021-05-14 10:03:23 +08:00
|
|
|
*
|
|
|
|
|
* flags - The architecture-specific value that represents the state of
|
|
|
|
|
* the interrupts prior to the call to spin_lock_irqsave(lock);
|
|
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* None
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2024-08-04 21:41:45 +08:00
|
|
|
#ifdef CONFIG_SPINLOCK
|
2024-08-04 21:07:41 +08:00
|
|
|
static inline_function
|
|
|
|
|
void spin_unlock_irqrestore(FAR volatile spinlock_t *lock,
|
|
|
|
|
irqstate_t flags)
|
|
|
|
|
{
|
|
|
|
|
/* Unlock without trace note */
|
2021-05-14 10:03:23 +08:00
|
|
|
|
2024-08-04 21:07:41 +08:00
|
|
|
spin_unlock_irqrestore_wo_note(lock, flags);
|
2022-12-28 14:30:38 +08:00
|
|
|
|
2024-08-04 21:07:41 +08:00
|
|
|
/* Notify that we are unlocking the spinlock */
|
|
|
|
|
|
|
|
|
|
sched_note_spinlock_unlock(lock);
|
|
|
|
|
}
|
2022-12-28 14:30:38 +08:00
|
|
|
#else
|
2024-08-04 21:07:41 +08:00
|
|
|
# define spin_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f))
|
2022-12-28 14:30:38 +08:00
|
|
|
#endif
|
|
|
|
|
|
2023-11-18 20:13:27 +08:00
|
|
|
#if defined(CONFIG_RW_SPINLOCK)
|
2023-10-15 15:13:11 +00:00
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Name: rwlock_init
|
|
|
|
|
*
|
|
|
|
|
* Description:
|
|
|
|
|
* Initialize a non-reentrant spinlock object to its initial,
|
|
|
|
|
* unlocked state.
|
|
|
|
|
*
|
|
|
|
|
* Input Parameters:
|
|
|
|
|
* lock - A reference to the spinlock object to be initialized.
|
|
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* None.
|
|
|
|
|
*
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
|
|
#define rwlock_init(l) do { *(l) = RW_SP_UNLOCKED; } while(0)
|
|
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Name: read_lock
|
|
|
|
|
*
|
|
|
|
|
* Description:
|
|
|
|
|
* If this task does not already hold the spinlock, then loop until the
|
|
|
|
|
* spinlock is successfully locked.
|
|
|
|
|
*
|
|
|
|
|
* This implementation is non-reentrant and set a bit of lock.
|
|
|
|
|
*
|
|
|
|
|
* The priority of reader is higher than writter if a reader hold the
|
|
|
|
|
* lock, a new reader can get its lock but writer can't get this lock.
|
|
|
|
|
*
|
|
|
|
|
* Input Parameters:
|
|
|
|
|
* lock - A reference to the spinlock object to lock.
|
|
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* None. When the function returns, the spinlock was successfully locked
|
|
|
|
|
* by this CPU.
|
|
|
|
|
*
|
|
|
|
|
* Assumptions:
|
|
|
|
|
* Not running at the interrupt level.
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2024-07-03 15:10:49 +08:00
|
|
|
static inline_function void read_lock(FAR volatile rwlock_t *lock)
|
|
|
|
|
{
|
|
|
|
|
while (true)
|
|
|
|
|
{
|
2024-11-20 20:53:28 +08:00
|
|
|
int old = atomic_read(lock);
|
2024-07-03 15:10:49 +08:00
|
|
|
if (old <= RW_SP_WRITE_LOCKED)
|
|
|
|
|
{
|
|
|
|
|
DEBUGASSERT(old == RW_SP_WRITE_LOCKED);
|
|
|
|
|
SP_DSB();
|
|
|
|
|
SP_WFE();
|
|
|
|
|
}
|
2024-11-20 20:53:28 +08:00
|
|
|
else if(atomic_cmpxchg(lock, &old, old + 1))
|
2024-07-03 15:10:49 +08:00
|
|
|
{
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
SP_DMB();
|
|
|
|
|
}
|
2023-10-15 15:13:11 +00:00
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Name: read_trylock
|
|
|
|
|
*
|
|
|
|
|
* Description:
|
|
|
|
|
* If this task does not already hold the spinlock, then try to get the
|
|
|
|
|
* lock.
|
|
|
|
|
*
|
|
|
|
|
* This implementation is non-reentrant and set a bit of lock.
|
|
|
|
|
*
|
|
|
|
|
* The priority of reader is higher than writter if a reader hold the
|
|
|
|
|
* lock, a new reader can get its lock but writer can't get this lock.
|
|
|
|
|
*
|
|
|
|
|
* Input Parameters:
|
|
|
|
|
* lock - A reference to the spinlock object to lock.
|
|
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* false - Failure, the spinlock was already locked
|
|
|
|
|
* true - Success, the spinlock was successfully locked
|
|
|
|
|
*
|
|
|
|
|
* Assumptions:
|
|
|
|
|
* Not running at the interrupt level.
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2024-07-03 15:10:49 +08:00
|
|
|
static inline_function bool read_trylock(FAR volatile rwlock_t *lock)
|
|
|
|
|
{
|
|
|
|
|
while (true)
|
|
|
|
|
{
|
2024-11-20 20:53:28 +08:00
|
|
|
int old = atomic_read(lock);
|
2024-07-03 15:10:49 +08:00
|
|
|
if (old <= RW_SP_WRITE_LOCKED)
|
|
|
|
|
{
|
|
|
|
|
DEBUGASSERT(old == RW_SP_WRITE_LOCKED);
|
|
|
|
|
return false;
|
|
|
|
|
}
|
2024-11-20 20:53:28 +08:00
|
|
|
else if (atomic_cmpxchg(lock, &old, old + 1))
|
2024-07-03 15:10:49 +08:00
|
|
|
{
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
SP_DMB();
|
|
|
|
|
return true;
|
|
|
|
|
}
|
2023-10-15 15:13:11 +00:00
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Name: read_unlock
|
|
|
|
|
*
|
|
|
|
|
* Description:
|
|
|
|
|
* Release a bit on a non-reentrant spinlock.
|
|
|
|
|
*
|
|
|
|
|
* Input Parameters:
|
|
|
|
|
* lock - A reference to the spinlock object to unlock.
|
|
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* None.
|
|
|
|
|
*
|
|
|
|
|
* Assumptions:
|
|
|
|
|
* Not running at the interrupt level.
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2024-07-03 15:10:49 +08:00
|
|
|
static inline_function void read_unlock(FAR volatile rwlock_t *lock)
|
|
|
|
|
{
|
2024-11-20 20:53:28 +08:00
|
|
|
DEBUGASSERT(atomic_read(lock) >= RW_SP_READ_LOCKED);
|
2024-07-03 15:10:49 +08:00
|
|
|
|
|
|
|
|
SP_DMB();
|
2024-11-20 20:53:28 +08:00
|
|
|
atomic_fetch_sub(lock, 1);
|
2024-07-03 15:10:49 +08:00
|
|
|
SP_DSB();
|
|
|
|
|
SP_SEV();
|
|
|
|
|
}
|
2023-10-15 15:13:11 +00:00
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Name: write_lock
|
|
|
|
|
*
|
|
|
|
|
* Description:
|
|
|
|
|
* If this CPU does not already hold the spinlock, then loop until the
|
|
|
|
|
* spinlock is successfully locked.
|
|
|
|
|
*
|
|
|
|
|
* This implementation is non-reentrant and set all bit on lock to avoid
|
|
|
|
|
* readers and writers.
|
|
|
|
|
*
|
|
|
|
|
* The priority of reader is higher than writter if a reader hold the
|
|
|
|
|
* lock, a new reader can get its lock but writer can't get this lock.
|
|
|
|
|
*
|
|
|
|
|
* Input Parameters:
|
|
|
|
|
* lock - A reference to the spinlock object to lock.
|
|
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* None. When the function returns, the spinlock was successfully locked
|
|
|
|
|
* by this CPU.
|
|
|
|
|
*
|
|
|
|
|
* Assumptions:
|
|
|
|
|
* Not running at the interrupt level.
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2024-07-03 15:10:49 +08:00
|
|
|
static inline_function void write_lock(FAR volatile rwlock_t *lock)
|
|
|
|
|
{
|
|
|
|
|
int zero = RW_SP_UNLOCKED;
|
|
|
|
|
|
2024-11-20 20:53:28 +08:00
|
|
|
while (!atomic_cmpxchg(lock, &zero, RW_SP_WRITE_LOCKED))
|
2024-07-03 15:10:49 +08:00
|
|
|
{
|
|
|
|
|
SP_DSB();
|
|
|
|
|
SP_WFE();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
SP_DMB();
|
|
|
|
|
}
|
2023-10-15 15:13:11 +00:00
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Name: write_trylock
|
|
|
|
|
*
|
|
|
|
|
* Description:
|
|
|
|
|
* If this task does not already hold the spinlock, then loop until the
|
|
|
|
|
* spinlock is successfully locked.
|
|
|
|
|
*
|
|
|
|
|
* This implementation is non-reentrant and set all bit on lock to avoid
|
|
|
|
|
* readers and writers.
|
|
|
|
|
*
|
|
|
|
|
* The priority of reader is higher than writter if a reader hold the
|
|
|
|
|
* lock, a new reader can get its lock but writer can't get this lock.
|
|
|
|
|
*
|
|
|
|
|
* Input Parameters:
|
|
|
|
|
* lock - A reference to the spinlock object to lock.
|
|
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* false - Failure, the spinlock was already locked
|
|
|
|
|
* true - Success, the spinlock was successfully locked
|
|
|
|
|
*
|
|
|
|
|
* Assumptions:
|
|
|
|
|
* Not running at the interrupt level.
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2024-07-03 15:10:49 +08:00
|
|
|
static inline_function bool write_trylock(FAR volatile rwlock_t *lock)
|
|
|
|
|
{
|
|
|
|
|
int zero = RW_SP_UNLOCKED;
|
|
|
|
|
|
2024-11-20 20:53:28 +08:00
|
|
|
if (atomic_cmpxchg(lock, &zero, RW_SP_WRITE_LOCKED))
|
2024-07-03 15:10:49 +08:00
|
|
|
{
|
|
|
|
|
SP_DMB();
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
SP_DSB();
|
|
|
|
|
return false;
|
|
|
|
|
}
|
2023-10-15 15:13:11 +00:00
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Name: write_unlock
|
|
|
|
|
*
|
|
|
|
|
* Description:
|
|
|
|
|
* Release all bit on a non-reentrant spinlock.
|
|
|
|
|
*
|
|
|
|
|
* Input Parameters:
|
|
|
|
|
* lock - A reference to the spinlock object to unlock.
|
|
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* None.
|
|
|
|
|
*
|
|
|
|
|
* Assumptions:
|
|
|
|
|
* Not running at the interrupt level.
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2024-07-03 15:10:49 +08:00
|
|
|
static inline_function void write_unlock(FAR volatile rwlock_t *lock)
|
|
|
|
|
{
|
|
|
|
|
/* Ensure this cpu already get write lock */
|
|
|
|
|
|
2024-11-20 20:53:28 +08:00
|
|
|
DEBUGASSERT(atomic_read(lock) == RW_SP_WRITE_LOCKED);
|
2024-07-03 15:10:49 +08:00
|
|
|
|
|
|
|
|
SP_DMB();
|
2024-11-20 20:53:28 +08:00
|
|
|
atomic_set(lock, RW_SP_UNLOCKED);
|
2024-07-03 15:10:49 +08:00
|
|
|
SP_DSB();
|
|
|
|
|
SP_SEV();
|
|
|
|
|
}
|
2023-10-15 15:13:11 +00:00
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Name: read_lock_irqsave
|
|
|
|
|
*
|
|
|
|
|
* Description:
|
|
|
|
|
* If SMP is enabled:
|
2024-11-26 07:46:31 +08:00
|
|
|
* The argument lock should be specified,
|
2023-10-15 15:13:11 +00:00
|
|
|
* disable local interrupts and take the lock spinlock and return
|
|
|
|
|
* the interrupt state.
|
|
|
|
|
*
|
|
|
|
|
* NOTE: This API is very simple to protect data (e.g. H/W register
|
2024-11-26 07:46:31 +08:00
|
|
|
* or internal data structure) in SMP mode. Do not use this API
|
2023-10-15 15:13:11 +00:00
|
|
|
* with kernel APIs which suspend a caller thread. (e.g. nxsem_wait)
|
|
|
|
|
*
|
|
|
|
|
* If SMP is not enabled:
|
|
|
|
|
* This function is equivalent to up_irq_save().
|
|
|
|
|
*
|
|
|
|
|
* Input Parameters:
|
2024-11-26 07:46:31 +08:00
|
|
|
* lock - Caller specific spinlock, not NULL.
|
2023-10-15 15:13:11 +00:00
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* An opaque, architecture-specific value that represents the state of
|
|
|
|
|
* the interrupts prior to the call to write_lock_irqsave(lock);
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2024-08-04 21:41:45 +08:00
|
|
|
#ifdef CONFIG_SPINLOCK
|
2023-10-15 15:13:11 +00:00
|
|
|
irqstate_t read_lock_irqsave(FAR rwlock_t *lock);
|
|
|
|
|
#else
|
|
|
|
|
# define read_lock_irqsave(l) ((void)(l), up_irq_save())
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Name: read_unlock_irqrestore
|
|
|
|
|
*
|
|
|
|
|
* Description:
|
|
|
|
|
* If SMP is enabled:
|
2024-11-26 07:46:31 +08:00
|
|
|
* The argument lock should be specified, release the lock and
|
2023-10-15 15:13:11 +00:00
|
|
|
* restore the interrupt state as it was prior to the previous call to
|
|
|
|
|
* read_lock_irqsave(lock).
|
|
|
|
|
*
|
|
|
|
|
* If SMP is not enabled:
|
|
|
|
|
* This function is equivalent to up_irq_restore().
|
|
|
|
|
*
|
|
|
|
|
* Input Parameters:
|
2024-11-26 07:46:31 +08:00
|
|
|
* lock - Caller specific spinlock, not NULL.
|
2023-10-15 15:13:11 +00:00
|
|
|
*
|
|
|
|
|
* flags - The architecture-specific value that represents the state of
|
|
|
|
|
* the interrupts prior to the call to read_lock_irqsave(lock);
|
|
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* None
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2024-08-04 21:41:45 +08:00
|
|
|
#ifdef CONFIG_SPINLOCK
|
2023-10-15 15:13:11 +00:00
|
|
|
void read_unlock_irqrestore(FAR rwlock_t *lock, irqstate_t flags);
|
|
|
|
|
#else
|
2024-08-04 20:38:36 +08:00
|
|
|
# define read_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f))
|
2023-10-15 15:13:11 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Name: write_lock_irqsave
|
|
|
|
|
*
|
|
|
|
|
* Description:
|
|
|
|
|
* If SMP is enabled:
|
2024-11-26 07:46:31 +08:00
|
|
|
* The argument lock should be specified,
|
2023-10-15 15:13:11 +00:00
|
|
|
* disable local interrupts and take the lock spinlock and return
|
|
|
|
|
* the interrupt state.
|
|
|
|
|
*
|
|
|
|
|
* NOTE: This API is very simple to protect data (e.g. H/W register
|
|
|
|
|
* or internal data structure) in SMP mode. But do not use this API
|
|
|
|
|
* with kernel APIs which suspend a caller thread. (e.g. nxsem_wait)
|
|
|
|
|
*
|
|
|
|
|
* If SMP is not enabled:
|
|
|
|
|
* This function is equivalent to up_irq_save().
|
|
|
|
|
*
|
|
|
|
|
* Input Parameters:
|
2024-11-26 07:46:31 +08:00
|
|
|
* lock - Caller specific spinlock, not NULL.
|
2023-10-15 15:13:11 +00:00
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* An opaque, architecture-specific value that represents the state of
|
|
|
|
|
* the interrupts prior to the call to write_lock_irqsave(lock);
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2024-08-04 21:41:45 +08:00
|
|
|
#ifdef CONFIG_SPINLOCK
|
2023-10-15 15:13:11 +00:00
|
|
|
irqstate_t write_lock_irqsave(FAR rwlock_t *lock);
|
|
|
|
|
#else
|
|
|
|
|
# define write_lock_irqsave(l) ((void)(l), up_irq_save())
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Name: write_unlock_irqrestore
|
|
|
|
|
*
|
|
|
|
|
* Description:
|
|
|
|
|
* If SMP is enabled:
|
2024-11-26 07:46:31 +08:00
|
|
|
* The argument lock should be specified, release the lock and
|
2023-10-15 15:13:11 +00:00
|
|
|
* restore the interrupt state as it was prior to the previous call to
|
|
|
|
|
* write_lock_irqsave(lock).
|
|
|
|
|
*
|
|
|
|
|
* If SMP is not enabled:
|
|
|
|
|
* This function is equivalent to up_irq_restore().
|
|
|
|
|
*
|
|
|
|
|
* Input Parameters:
|
2024-11-26 07:46:31 +08:00
|
|
|
* lock - Caller specific spinlock, not NULL.
|
2023-10-15 15:13:11 +00:00
|
|
|
*
|
|
|
|
|
* flags - The architecture-specific value that represents the state of
|
|
|
|
|
* the interrupts prior to the call to write_lock_irqsave(lock);
|
|
|
|
|
*
|
|
|
|
|
* Returned Value:
|
|
|
|
|
* None
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2024-08-04 21:41:45 +08:00
|
|
|
#ifdef CONFIG_SPINLOCK
|
2023-10-15 15:13:11 +00:00
|
|
|
void write_unlock_irqrestore(FAR rwlock_t *lock, irqstate_t flags);
|
|
|
|
|
#else
|
2024-08-04 20:38:36 +08:00
|
|
|
# define write_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f))
|
2023-10-15 15:13:11 +00:00
|
|
|
#endif
|
|
|
|
|
|
2023-11-18 20:13:27 +08:00
|
|
|
#endif /* CONFIG_RW_SPINLOCK */
|
2023-11-12 21:50:20 +08:00
|
|
|
|
|
|
|
|
#undef EXTERN
|
|
|
|
|
#if defined(__cplusplus)
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2016-02-09 13:48:59 -06:00
|
|
|
#endif /* __INCLUDE_NUTTX_SPINLOCK_H */
|