#if( portUSING_MPU_WRAPPERS == 1 )
struct xMEMORY_REGION;
void vPortStoreTaskMPUSettings( xMPU_SETTINGS *xMPUSettings, const struct xMEMORY_REGION * const xRegions, StackType_t *pxBottomOfStack, uint32_t usStackDepth ) PRIVILEGED_FUNCTION;
+ void vPortReleaseTaskMPUSettings( xMPU_SETTINGS *xMPUSettings );
#endif
/* Multi-core: get current core ID */
#define portASSERT_IF_IN_ISR() vPortAssertIfInISR()
void vPortAssertIfInISR();
+
#define portCRITICAL_NESTING_IN_TCB 1
/*
#define PRIVILEGED_DATA
#endif
+
+void _xt_coproc_release(void * coproc_sa_base);
+
+
// porttrace
#if configUSE_TRACE_FACILITY_2
#include "porttrace.h"
*/
#endif
}
+
+void vPortReleaseTaskMPUSettings( xMPU_SETTINGS *xMPUSettings )
+{
+ /* If task has live floating point registers somewhere, release them */
+ _xt_coproc_release( xMPUSettings->coproc_area );
+}
+
#endif
/*
{
pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) );
- ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
- --uxCurrentNumberOfTasks;
- --uxTasksDeleted;
+ /* We only want to kill tasks that ran on this core because e.g. _xt_coproc_release needs to
+ be called on the core the process is pinned on, if any */
+ if( pxTCB->xCoreID == tskNO_AFFINITY || pxTCB->xCoreID == xPortGetCoreID()) {
+ ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
+ --uxCurrentNumberOfTasks;
+ --uxTasksDeleted;
+ } else {
+ /* Need to wait until the idle task on the other processor kills that task first. */
+ break;
+ }
}
#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
}
#endif /* configUSE_NEWLIB_REENTRANT */
+ #if ( portUSING_MPU_WRAPPERS == 1 )
+ vPortReleaseTaskMPUSettings( &( pxTCB->xMPUSettings) );
+ #endif
+
#if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
{
/* The task can only have been allocated dynamically - free both
the co-proc save area, to avoid the exception handler having to save the
thread's co-proc state before another thread can use it (optimization).
+Needs to be called on the processor the thread was running on. Unpinned threads
+won't have an entry here because they get pinned as soon they use a coprocessor.
+
Entry Conditions:
A2 = Pointer to base of co-processor state save area.
addx4 a0, a5, a0 /* a0 = &_xt_coproc_mask[n] */
l32i a0, a0, 0 /* a0 = (n << 16) | (1 << n) */
- /* TODO: Remove this as soon as coprocessor state moving works across cores - JD */
/* FPU operations are incompatible with non-pinned tasks. If we have a FPU operation
here, to keep the entire thing from crashing, it's better to pin the task to whatever
core we're running on now. */
/* Grab correct xt_coproc_owner_sa for this core */
movi a2, XCHAL_CP_MAX << 2
- mull a2, a2, a3
+ mull a2, a2, a3 /* multiply by current processor id */
movi a3, _xt_coproc_owner_sa /* a3 = base of owner array */
- add a3, a3, a2
+ add a3, a3, a2 /* a3 = owner area needed for this processor */
extui a2, a0, 0, 16 /* coprocessor bitmask portion */
or a4, a4, a2 /* a4 = CPENABLE | (1 << n) */
wsr a4, CPENABLE
+/*
+Keep loading _xt_coproc_owner_sa[n] atomic (=load once, then use that value
+everywhere): _xt_coproc_release assumes it works like this in order not to need
+locking.
+*/
+
+
/* Get old coprocessor owner thread (save area ptr) and assign new one. */
addx4 a3, a5, a3 /* a3 = &_xt_coproc_owner_sa[n] */
l32i a2, a3, 0 /* a2 = old owner's save area */