Commit 84538074 by Jerome Lambourg Committed by Arnaud Charlet

sigtramp-vxworks-target.inc: sigtramp-vxworks: force the stack alignment for x86_64.

2016-04-18  Jerome Lambourg  <lambourg@adacore.com>

	* sigtramp-vxworks-target.inc: sigtramp-vxworks: force the stack
	alignment for x86_64.
	* init.c: Better fix for guard page reset on x86_64-vx7.
	Do not try to retrieve the page that actually raised
	the signal as the probing mechanism used on x86_64 do not allow
	such retrieval. We thus just test if the guard page is active,
	and re-activate it if not.

From-SVN: r235102
parent 230ad369
2016-04-18 Jerome Lambourg <lambourg@adacore.com>
* sigtramp-vxworks-target.inc: sigtramp-vxworks: force the stack
alignment for x86_64.
* init.c: Better fix for guard page reset on x86_64-vx7.
Do not try to retrieve the page that actually raised
the signal as the probing mechanism used on x86_64 do not allow
such retrieval. We thus just test if the guard page is active,
and re-activate it if not.
2016-04-18 Arnaud Charlet <charlet@adacore.com> 2016-04-18 Arnaud Charlet <charlet@adacore.com>
* a-sytaco.adb (Suspension_Object): Aspect Default_Initial_Condition * a-sytaco.adb (Suspension_Object): Aspect Default_Initial_Condition
......
...@@ -1759,7 +1759,7 @@ getpid (void) ...@@ -1759,7 +1759,7 @@ getpid (void)
This function returns TRUE in case the guard page was hit by the This function returns TRUE in case the guard page was hit by the
signal. */ signal. */
static int static int
__gnat_reset_guard_page (int sig, void *sc) __gnat_reset_guard_page (int sig)
{ {
/* On ARM VxWorks 6.x and x86_64 VxWorks 7, the guard page is left un-armed /* On ARM VxWorks 6.x and x86_64 VxWorks 7, the guard page is left un-armed
by the kernel after being violated, so subsequent violations aren't by the kernel after being violated, so subsequent violations aren't
...@@ -1776,42 +1776,24 @@ __gnat_reset_guard_page (int sig, void *sc) ...@@ -1776,42 +1776,24 @@ __gnat_reset_guard_page (int sig, void *sc)
TASK_ID tid = taskIdSelf (); TASK_ID tid = taskIdSelf ();
WIND_TCB *pTcb = taskTcb (tid); WIND_TCB *pTcb = taskTcb (tid);
REG_SET *pregs = ((struct sigcontext *) sc)->sc_pregs;
VIRT_ADDR guardPage = (VIRT_ADDR) pTcb->pStackEnd - INT_OVERFLOW_SIZE; VIRT_ADDR guardPage = (VIRT_ADDR) pTcb->pStackEnd - INT_OVERFLOW_SIZE;
UINT stateMask = VM_STATE_MASK_VALID; UINT stateMask = VM_STATE_MASK_VALID;
UINT state = VM_STATE_VALID_NOT; UINT guardState = VM_STATE_VALID_NOT;
size_t probe_distance = 0;
VIRT_ADDR sigPage;
#if defined (ARMEL) #if (_WRS_VXWORKS_MAJOR >= 7)
/* violating address in rip: r12 */ stateMask |= MMU_ATTR_SPL_MSK;
sigPage = pregs->r[12] & ~(INT_OVERFLOW_SIZE - 1); guardState |= MMU_ATTR_NO_BLOCK;
#elif defined (__x86_64__)
/* violating address in rsp. */
probe_distance = 16 * 1024; /* in gcc/config/i386/vxworks7.h */
sigPage = pregs->rsp & ~(INT_OVERFLOW_SIZE - 1);
stateMask |= MMU_ATTR_SPL_MSK;
state |= MMU_ATTR_NO_BLOCK;
#else
#error "Not Implemented for this CPU"
#endif #endif
if (guardPage == (sigPage - probe_distance)) UINT nState;
vmStateGet (NULL, guardPage, &nState);
if ((nState & VM_STATE_MASK_VALID) != VM_STATE_VALID_NOT)
{ {
UINT nState; /* If the guard page has a valid state, we need to reset to
vmStateGet (NULL, guardPage, &nState); invalid state here */
if ((nState & VM_STATE_MASK_VALID) != VM_STATE_VALID_NOT) { vmStateSet (NULL, guardPage, INT_OVERFLOW_SIZE, stateMask, guardState);
/* If the guard page has a valid state, we need to reset to
invalid state here */
vmStateSet (NULL, guardPage, INT_OVERFLOW_SIZE, stateMask, state);
}
return TRUE; return TRUE;
} }
else
{
return FALSE;
}
#endif /* VXWORKS_FORCE_GUARD_PAGE */ #endif /* VXWORKS_FORCE_GUARD_PAGE */
return FALSE; return FALSE;
} }
...@@ -1919,7 +1901,7 @@ __gnat_map_signal (int sig, siginfo_t *si ATTRIBUTE_UNUSED, void *sc) ...@@ -1919,7 +1901,7 @@ __gnat_map_signal (int sig, siginfo_t *si ATTRIBUTE_UNUSED, void *sc)
msg = "unhandled signal"; msg = "unhandled signal";
} }
if (__gnat_reset_guard_page (sig, sc)) if (__gnat_reset_guard_page (sig))
{ {
/* Set the exception message: we know for sure that we have a /* Set the exception message: we know for sure that we have a
stack overflow here */ stack overflow here */
...@@ -1997,14 +1979,17 @@ __gnat_error_handler (int sig, siginfo_t *si, void *sc) ...@@ -1997,14 +1979,17 @@ __gnat_error_handler (int sig, siginfo_t *si, void *sc)
when they contain SPE instructions, we need to set it back before doing when they contain SPE instructions, we need to set it back before doing
anything else. anything else.
This mechanism is only need in kernel mode. */ This mechanism is only need in kernel mode. */
#if !(defined (__RTP__) || defined (CERT)) && ((CPU == PPCE500V2) || (CPU == PPC85XX)) #if !(defined (__RTP__) || defined (VTHREADS)) && ((CPU == PPCE500V2) || (CPU == PPC85XX))
register unsigned msr; register unsigned msr;
/* Read the MSR value */ /* Read the MSR value */
asm volatile ("mfmsr %0" : "=r" (msr)); asm volatile ("mfmsr %0" : "=r" (msr));
/* Force the SPE bit */ /* Force the SPE bit if not set. */
msr |= 0x02000000; if ((msr & 0x02000000) == 0)
/* Store to MSR */ {
asm volatile ("mtmsr %0" : : "r" (msr)); msr |= 0x02000000;
/* Store to MSR */
asm volatile ("mtmsr %0" : : "r" (msr));
}
#endif #endif
/* VxWorks will always mask out the signal during the signal handler and /* VxWorks will always mask out the signal during the signal handler and
......
...@@ -159,7 +159,7 @@ ...@@ -159,7 +159,7 @@
#define REGNO_R13 13 #define REGNO_R13 13
#define REGNO_R14 14 #define REGNO_R14 14
#define REGNO_R15 15 #define REGNO_R15 15
#define REGNO_SET_PC 16 /* aka %rip */ #define REGNO_RPC 16 /* aka %rip */
#define REGNO_EFLAGS 49 #define REGNO_EFLAGS 49
#define REGNO_FS 54 #define REGNO_FS 54
...@@ -401,8 +401,6 @@ TCR("ret") ...@@ -401,8 +401,6 @@ TCR("ret")
#define COMMON_CFI(REG) \ #define COMMON_CFI(REG) \
".cfi_offset " S(REGNO_##REG) "," S(REG_##REG) ".cfi_offset " S(REGNO_##REG) "," S(REG_##REG)
#define PC_CFI(REG) \
".cfi_offset " S(REGNO_##REG) "," S(REG_##REG)
#define CFI_COMMON_REGS \ #define CFI_COMMON_REGS \
CR("# CFI for common registers\n") \ CR("# CFI for common registers\n") \
...@@ -422,10 +420,8 @@ TCR(COMMON_CFI(RBX)) \ ...@@ -422,10 +420,8 @@ TCR(COMMON_CFI(RBX)) \
TCR(COMMON_CFI(RDX)) \ TCR(COMMON_CFI(RDX)) \
TCR(COMMON_CFI(RCX)) \ TCR(COMMON_CFI(RCX)) \
TCR(COMMON_CFI(RAX)) \ TCR(COMMON_CFI(RAX)) \
TCR(COMMON_CFI(EFLAGS)) \ TCR(COMMON_CFI(RPC)) \
TCR(COMMON_CFI(SET_PC)) \ TCR(".cfi_return_column " S(REGNO_RPC))
TCR(COMMON_CFI(FS)) \
TCR(".cfi_return_column " S(REGNO_SET_PC))
/* Trampoline body block /* Trampoline body block
--------------------- */ --------------------- */
...@@ -451,10 +447,17 @@ Not_implemented; ...@@ -451,10 +447,17 @@ Not_implemented;
/* Symbol definition block /* Symbol definition block
----------------------- */ ----------------------- */
#ifdef __x86_64__
#define FUNC_ALIGN TCR(".p2align 4,,15")
#else
#define FUNC_ALIGN
#endif
#define SIGTRAMP_START(SYM) \ #define SIGTRAMP_START(SYM) \
CR("# " S(SYM) " cfi trampoline") \ CR("# " S(SYM) " cfi trampoline") \
TCR(".type " S(SYM) ", "FUNCTION) \ TCR(".type " S(SYM) ", "FUNCTION) \
CR("") \ CR("") \
FUNC_ALIGN \
CR(S(SYM) ":") \ CR(S(SYM) ":") \
TCR(".cfi_startproc") \ TCR(".cfi_startproc") \
TCR(".cfi_signal_frame") TCR(".cfi_signal_frame")
...@@ -474,4 +477,3 @@ TCR(".size " S(SYM) ", .-" S(SYM)) ...@@ -474,4 +477,3 @@ TCR(".size " S(SYM) ", .-" S(SYM))
asm (".text\n" asm (".text\n"
TCR(".align 2")); TCR(".align 2"));
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment