diff --git a/include/nuttx/irq.h b/include/nuttx/irq.h index 2f7dba3764..f7bfd75b3b 100644 --- a/include/nuttx/irq.h +++ b/include/nuttx/irq.h @@ -1,7 +1,7 @@ /**************************************************************************** * include/nuttx/irq.h * - * Copyright (C) 2007-2011, 2013 Gregory Nutt. All rights reserved. + * Copyright (C) 2007-2011, 2013, 2016 Gregory Nutt. All rights reserved. * Author: Gregory Nutt * * Redistribution and use in source and binary forms, with or without @@ -113,7 +113,7 @@ int irq_attach(int irq, xcpt_t isr); * ****************************************************************************/ -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION) irqstate_t enter_critical_section(void); #else # define enter_critical_section(f) up_irq_save(f) @@ -131,7 +131,7 @@ irqstate_t enter_critical_section(void); * ****************************************************************************/ -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION) void leave_critical_section(irqstate_t flags); #else # define leave_critical_section(f) up_irq_restore(f) @@ -144,4 +144,3 @@ void leave_critical_section(irqstate_t flags); #endif #endif /* __INCLUDE_NUTTX_IRQ_H */ - diff --git a/sched/Kconfig b/sched/Kconfig index 6dfbd2a954..8e228dc04e 100644 --- a/sched/Kconfig +++ b/sched/Kconfig @@ -627,7 +627,7 @@ config SCHED_INSTRUMENTATION_PREEMPTION config SCHED_INSTRUMENTATION_CSECTION bool "Critical section monitor hooks" default n - depends on SMP + depends on EXPERIMENTAL ---help--- Enables additional hooks for entry and exit from critical sections. Interrupts are disabled while within a critical section. Board- @@ -635,6 +635,13 @@ config SCHED_INSTRUMENTATION_CSECTION void sched_note_csection(FAR struct tcb_s *tcb, bool state); + NOTE: This option is marked EXPERIMENTAL because there is a logical + error in the design. That error is that sched_note_get() calls + enter/leave_critical_section. When the buffer note buffer has been + filled, each of these calls causes an entry to be removed from the + note buffer to make more space. The end result is that every other + note is lost when dumping the note buffer. Not very useful! + config SCHED_INSTRUMENTATION_BUFFER bool "Buffer instrumentation data in memory" default n diff --git a/sched/irq/Make.defs b/sched/irq/Make.defs index 905720750e..9b5b264d66 100644 --- a/sched/irq/Make.defs +++ b/sched/irq/Make.defs @@ -37,6 +37,8 @@ CSRCS += irq_initialize.c irq_attach.c irq_dispatch.c irq_unexpectedisr.c ifeq ($(CONFIG_SMP),y) CSRCS += irq_csection.c +else ifeq ($(CONFIG_SCHED_INSTRUMENTATION_CSECTION),y) +CSRCS += irq_csection.c endif # Include irq build support diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c index 414238045e..573e193039 100644 --- a/sched/irq/irq_csection.c +++ b/sched/irq/irq_csection.c @@ -48,11 +48,13 @@ #include "sched/sched.h" #include "irq/irq.h" -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION) /**************************************************************************** * Public Data ****************************************************************************/ + +#ifdef CONFIG_SMP /* This is the spinlock that enforces critical sections when interrupts are * disabled. */ @@ -63,6 +65,7 @@ volatile spinlock_t g_cpu_irqlock = SP_UNLOCKED; volatile spinlock_t g_cpu_irqsetlock; volatile cpu_set_t g_cpu_irqset; +#endif /**************************************************************************** * Public Functions @@ -78,6 +81,7 @@ volatile cpu_set_t g_cpu_irqset; * ****************************************************************************/ +#ifdef CONFIG_SMP irqstate_t enter_critical_section(void) { FAR struct tcb_s *rtcb; @@ -96,6 +100,8 @@ irqstate_t enter_critical_section(void) /* Do we already have interrupts disabled? */ rtcb = this_task(); + DEBUGASSERT(rtcb != NULL); + if (rtcb->irqcount > 0) { /* Yes... make sure that the spinlock is set and increment the IRQ @@ -136,6 +142,26 @@ irqstate_t enter_critical_section(void) return up_irq_save(); } +#else /* defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION) */ +irqstate_t enter_critical_section(void) +{ + /* Check if we were called from an interrupt handler */ + + if (!up_interrupt_context()) + { + FAR struct tcb_s *rtcb = this_task(); + DEBUGASSERT(rtcb != NULL); + + /* No.. note that we have entered the critical section */ + + sched_note_csection(rtcb, true); + } + + /* And disable interrupts */ + + return up_irq_save(); +} +#endif /**************************************************************************** * Name: leave_critical_section @@ -146,6 +172,7 @@ irqstate_t enter_critical_section(void) * ****************************************************************************/ +#ifdef CONFIG_SMP void leave_critical_section(irqstate_t flags) { /* Do nothing if called from an interrupt handler */ @@ -153,8 +180,7 @@ void leave_critical_section(irqstate_t flags) if (!up_interrupt_context()) { FAR struct tcb_s *rtcb = this_task(); - - DEBUGASSERT(rtcb->irqcount > 0); + DEBUGASSERT(rtcb != 0 && rtcb->irqcount > 0); /* Will we still have interrupts disabled after decrementing the * count? @@ -211,5 +237,25 @@ void leave_critical_section(irqstate_t flags) up_irq_restore(flags); } } +#else /* defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION) */ +void leave_critical_section(irqstate_t flags) +{ + /* Check if we were called from an interrupt handler */ -#endif /* CONFIG_SMP */ + if (!up_interrupt_context()) + { + FAR struct tcb_s *rtcb = this_task(); + DEBUGASSERT(rtcb != NULL); + + /* Note that we have left the critical section */ + + sched_note_csection(rtcb, false); + } + + /* Restore the previous interrupt state. */ + + up_irq_restore(flags); +} +#endif + +#endif /* CONFIG_SMP || CONFIG_SCHED_INSTRUMENTATION_CSECTION*/ diff --git a/sched/sched/sched_note.c b/sched/sched/sched_note.c index ef25415dfe..ae414b096f 100644 --- a/sched/sched/sched_note.c +++ b/sched/sched/sched_note.c @@ -401,8 +401,6 @@ void sched_note_csection(FAR struct tcb_s *tcb, bool enter) note.ncs_count[1] = (uint8_t)((tcb->irqcount >> 8) & 0xff); #endif - note_systime((FAR struct note_common_s *)¬e); - /* Add the note to circular buffer */ note_add((FAR const uint8_t *)¬e, sizeof(struct note_csection_s)); @@ -450,7 +448,7 @@ ssize_t sched_note_get(FAR uint8_t *buffer, size_t buflen) /* Get the index to the tail of the circular buffer */ - tail = g_note_info.ni_tail; + tail = g_note_info.ni_tail; DEBUGASSERT(tail < CONFIG_SCHED_NOTE_BUFSIZE); /* Get the length of the note at the tail index */ @@ -467,7 +465,7 @@ ssize_t sched_note_get(FAR uint8_t *buffer, size_t buflen) note_remove(); - /* and return and error */ + /* and return an error */ notelen = -EFBIG; goto errout_with_csection;