Commit cbf2f479 by Kaveh R. Ghazi Committed by Kaveh Ghazi

m68k.c (m68k_output_function_prologue, [...]): Delete versions for DPX2/MOTOROLA and NEWS/MOTOROLA.

	* m68k.c (m68k_output_function_prologue,
	m68k_output_function_epilogue): Delete versions for DPX2/MOTOROLA
	and NEWS/MOTOROLA.
	* genattrtab.c: Remove dpx2 comment.
	* libgcc2.c (__enable_execute_stack): Delete versions for
	NeXT/__MACH__, __convex__, __sysV88__, __pyr__ and
	sony_news/SYSTYPE_BSD.
	* longlong.h: Delete code for __a29k__, _AM29K, __clipper__,
	__gmicro__, __i860__, __NeXT__ and __pyr__.
	* rtl.h: Remove convex comment.
	* varasm.c: Likewise.

From-SVN: r56272
parent d3969c34
2002-08-13 Kaveh R. Ghazi <ghazi@caip.rutgers.edu> 2002-08-13 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
* m68k.c (m68k_output_function_prologue,
m68k_output_function_epilogue): Delete versions for DPX2/MOTOROLA
and NEWS/MOTOROLA.
* genattrtab.c: Remove dpx2 comment.
* libgcc2.c (__enable_execute_stack): Delete versions for
NeXT/__MACH__, __convex__, __sysV88__, __pyr__ and
sony_news/SYSTYPE_BSD.
* longlong.h: Delete code for __a29k__, _AM29K, __clipper__,
__gmicro__, __i860__, __NeXT__ and __pyr__.
* rtl.h: Remove convex comment.
* varasm.c: Likewise.
2002-08-13 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
* c-opts.c (lang_flags): Const-ify. * c-opts.c (lang_flags): Const-ify.
* ra-build.c (undef_table): Likewise. * ra-build.c (undef_table): Likewise.
* ra.c (eliminables): Likewise. * ra.c (eliminables): Likewise.
......
...@@ -390,7 +390,6 @@ static void expand_units PARAMS ((void)); ...@@ -390,7 +390,6 @@ static void expand_units PARAMS ((void));
static rtx simplify_knowing PARAMS ((rtx, rtx)); static rtx simplify_knowing PARAMS ((rtx, rtx));
static rtx encode_units_mask PARAMS ((rtx)); static rtx encode_units_mask PARAMS ((rtx));
static void fill_attr PARAMS ((struct attr_desc *)); static void fill_attr PARAMS ((struct attr_desc *));
/* dpx2 compiler chokes if we specify the arg types of the args. */
static rtx substitute_address PARAMS ((rtx, rtx (*) (rtx), rtx (*) (rtx))); static rtx substitute_address PARAMS ((rtx, rtx (*) (rtx), rtx (*) (rtx)));
static void make_length_attrs PARAMS ((void)); static void make_length_attrs PARAMS ((void));
static rtx identity_fn PARAMS ((rtx)); static rtx identity_fn PARAMS ((rtx));
......
...@@ -1732,102 +1732,6 @@ mprotect (char *addr, int len, int prot) ...@@ -1732,102 +1732,6 @@ mprotect (char *addr, int len, int prot)
TRANSFER_FROM_TRAMPOLINE TRANSFER_FROM_TRAMPOLINE
#endif #endif
#if defined (NeXT) && defined (__MACH__)
/* Make stack executable so we can call trampolines on stack.
This is called from INITIALIZE_TRAMPOLINE in next.h. */
#ifdef NeXTStep21
#include <mach.h>
#else
#include <mach/mach.h>
#endif
void
__enable_execute_stack (char *addr)
{
kern_return_t r;
char *eaddr = addr + TRAMPOLINE_SIZE;
vm_address_t a = (vm_address_t) addr;
/* turn on execute access on stack */
r = vm_protect (task_self (), a, TRAMPOLINE_SIZE, FALSE, VM_PROT_ALL);
if (r != KERN_SUCCESS)
{
mach_error("vm_protect VM_PROT_ALL", r);
exit(1);
}
/* We inline the i-cache invalidation for speed */
#ifdef CLEAR_INSN_CACHE
CLEAR_INSN_CACHE (addr, eaddr);
#else
__clear_cache ((int) addr, (int) eaddr);
#endif
}
#endif /* defined (NeXT) && defined (__MACH__) */
#ifdef __convex__
/* Make stack executable so we can call trampolines on stack.
This is called from INITIALIZE_TRAMPOLINE in convex.h. */
#include <sys/mman.h>
#include <sys/vmparam.h>
#include <machine/machparam.h>
void
__enable_execute_stack (void)
{
int fp;
static unsigned lowest = USRSTACK;
unsigned current = (unsigned) &fp & -NBPG;
if (lowest > current)
{
unsigned len = lowest - current;
mremap (current, &len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE);
lowest = current;
}
/* Clear instruction cache in case an old trampoline is in it. */
asm ("pich");
}
#endif /* __convex__ */
#ifdef __sysV88__
/* Modified from the convex -code above. */
#include <sys/param.h>
#include <errno.h>
#include <sys/m88kbcs.h>
void
__enable_execute_stack (void)
{
int save_errno;
static unsigned long lowest = USRSTACK;
unsigned long current = (unsigned long) &save_errno & -NBPC;
/* Ignore errno being set. memctl sets errno to EINVAL whenever the
address is seen as 'negative'. That is the case with the stack. */
save_errno=errno;
if (lowest > current)
{
unsigned len=lowest-current;
memctl(current,len,MCT_TEXT);
lowest = current;
}
else
memctl(current,NBPC,MCT_TEXT);
errno=save_errno;
}
#endif /* __sysV88__ */
#ifdef __sysV68__ #ifdef __sysV68__
#include <sys/signal.h> #include <sys/signal.h>
...@@ -1872,57 +1776,6 @@ __clear_insn_cache (void) ...@@ -1872,57 +1776,6 @@ __clear_insn_cache (void)
} }
#endif /* __sysV68__ */ #endif /* __sysV68__ */
#ifdef __pyr__
#undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
#include <stdio.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/param.h>
#include <sys/vmmac.h>
/* Modified from the convex -code above.
mremap promises to clear the i-cache. */
void
__enable_execute_stack (void)
{
int fp;
if (mprotect (((unsigned int)&fp/PAGSIZ)*PAGSIZ, PAGSIZ,
PROT_READ|PROT_WRITE|PROT_EXEC))
{
perror ("mprotect in __enable_execute_stack");
fflush (stderr);
abort ();
}
}
#endif /* __pyr__ */
#if defined (sony_news) && defined (SYSTYPE_BSD)
#include <stdio.h>
#include <sys/types.h>
#include <sys/param.h>
#include <syscall.h>
#include <machine/sysnews.h>
/* cacheflush function for NEWS-OS 4.2.
This function is called from trampoline-initialize code
defined in config/mips/mips.h. */
void
cacheflush (char *beg, int size, int flag)
{
if (syscall (SYS_sysnews, NEWS_CACHEFLUSH, beg, size, FLUSH_BCACHE))
{
perror ("cache_flush");
fflush (stderr);
abort ();
}
}
#endif /* sony_news */
#endif /* L_trampoline */ #endif /* L_trampoline */
#ifndef __CYGWIN__ #ifndef __CYGWIN__
......
...@@ -107,49 +107,6 @@ ...@@ -107,49 +107,6 @@
#define __AND_CLOBBER_CC , "cc" #define __AND_CLOBBER_CC , "cc"
#endif /* __GNUC__ < 2 */ #endif /* __GNUC__ < 2 */
#if (defined (__a29k__) || defined (_AM29K)) && W_TYPE_SIZE == 32
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("add %1,%4,%5\n\taddc %0,%2,%3" \
: "=r" ((USItype) (sh)), \
"=&r" ((USItype) (sl)) \
: "%r" ((USItype) (ah)), \
"rI" ((USItype) (bh)), \
"%r" ((USItype) (al)), \
"rI" ((USItype) (bl)))
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
__asm__ ("sub %1,%4,%5\n\tsubc %0,%2,%3" \
: "=r" ((USItype) (sh)), \
"=&r" ((USItype) (sl)) \
: "r" ((USItype) (ah)), \
"rI" ((USItype) (bh)), \
"r" ((USItype) (al)), \
"rI" ((USItype) (bl)))
#define umul_ppmm(xh, xl, m0, m1) \
do { \
USItype __m0 = (m0), __m1 = (m1); \
__asm__ ("multiplu %0,%1,%2" \
: "=r" ((USItype) (xl)) \
: "r" (__m0), \
"r" (__m1)); \
__asm__ ("multmu %0,%1,%2" \
: "=r" ((USItype) (xh)) \
: "r" (__m0), \
"r" (__m1)); \
} while (0)
#define udiv_qrnnd(q, r, n1, n0, d) \
__asm__ ("dividu %0,%3,%4" \
: "=r" ((USItype) (q)), \
"=q" ((USItype) (r)) \
: "1" ((USItype) (n1)), \
"r" ((USItype) (n0)), \
"r" ((USItype) (d)))
#define count_leading_zeros(count, x) \
__asm__ ("clz %0,%1" \
: "=r" ((USItype) (count)) \
: "r" ((USItype) (x)))
#define COUNT_LEADING_ZEROS_0 32
#endif /* __a29k__ */
#if defined (__alpha) && W_TYPE_SIZE == 64 #if defined (__alpha) && W_TYPE_SIZE == 64
#define umul_ppmm(ph, pl, m0, m1) \ #define umul_ppmm(ph, pl, m0, m1) \
do { \ do { \
...@@ -275,71 +232,6 @@ UDItype __umulsidi3 (USItype, USItype); ...@@ -275,71 +232,6 @@ UDItype __umulsidi3 (USItype, USItype);
#define UDIV_TIME 100 #define UDIV_TIME 100
#endif /* __arm__ */ #endif /* __arm__ */
#if defined (__clipper__) && W_TYPE_SIZE == 32
#define umul_ppmm(w1, w0, u, v) \
({union {UDItype __ll; \
struct {USItype __l, __h;} __i; \
} __xx; \
__asm__ ("mulwux %2,%0" \
: "=r" (__xx.__ll) \
: "%0" ((USItype) (u)), \
"r" ((USItype) (v))); \
(w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
#define smul_ppmm(w1, w0, u, v) \
({union {DItype __ll; \
struct {SItype __l, __h;} __i; \
} __xx; \
__asm__ ("mulwx %2,%0" \
: "=r" (__xx.__ll) \
: "%0" ((SItype) (u)), \
"r" ((SItype) (v))); \
(w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
#define __umulsidi3(u, v) \
({UDItype __w; \
__asm__ ("mulwux %2,%0" \
: "=r" (__w) \
: "%0" ((USItype) (u)), \
"r" ((USItype) (v))); \
__w; })
#endif /* __clipper__ */
#if defined (__gmicro__) && W_TYPE_SIZE == 32
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("add.w %5,%1\n\taddx %3,%0" \
: "=g" ((USItype) (sh)), \
"=&g" ((USItype) (sl)) \
: "%0" ((USItype) (ah)), \
"g" ((USItype) (bh)), \
"%1" ((USItype) (al)), \
"g" ((USItype) (bl)))
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
__asm__ ("sub.w %5,%1\n\tsubx %3,%0" \
: "=g" ((USItype) (sh)), \
"=&g" ((USItype) (sl)) \
: "0" ((USItype) (ah)), \
"g" ((USItype) (bh)), \
"1" ((USItype) (al)), \
"g" ((USItype) (bl)))
#define umul_ppmm(ph, pl, m0, m1) \
__asm__ ("mulx %3,%0,%1" \
: "=g" ((USItype) (ph)), \
"=r" ((USItype) (pl)) \
: "%0" ((USItype) (m0)), \
"g" ((USItype) (m1)))
#define udiv_qrnnd(q, r, nh, nl, d) \
__asm__ ("divx %4,%0,%1" \
: "=g" ((USItype) (q)), \
"=r" ((USItype) (r)) \
: "1" ((USItype) (nh)), \
"0" ((USItype) (nl)), \
"g" ((USItype) (d)))
#define count_leading_zeros(count, x) \
__asm__ ("bsch/1 %1,%0" \
: "=g" (count) \
: "g" ((USItype) (x)), \
"0" ((USItype) 0))
#endif
#if defined (__hppa) && W_TYPE_SIZE == 32 #if defined (__hppa) && W_TYPE_SIZE == 32
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("add %4,%5,%1\n\taddc %2,%3,%0" \ __asm__ ("add %4,%5,%1\n\taddc %2,%3,%0" \
...@@ -484,47 +376,6 @@ UDItype __umulsidi3 (USItype, USItype); ...@@ -484,47 +376,6 @@ UDItype __umulsidi3 (USItype, USItype);
#define UDIV_TIME 40 #define UDIV_TIME 40
#endif /* 80x86 */ #endif /* 80x86 */
#if defined (__i860__) && W_TYPE_SIZE == 32
#if 0
/* Make sure these patterns really improve the code before
switching them on. */
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
do { \
union \
{ \
DItype __ll; \
struct {USItype __l, __h;} __i; \
} __a, __b, __s; \
__a.__i.__l = (al); \
__a.__i.__h = (ah); \
__b.__i.__l = (bl); \
__b.__i.__h = (bh); \
__asm__ ("fiadd.dd %1,%2,%0" \
: "=f" (__s.__ll) \
: "%f" (__a.__ll), "f" (__b.__ll)); \
(sh) = __s.__i.__h; \
(sl) = __s.__i.__l; \
} while (0)
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
do { \
union \
{ \
DItype __ll; \
struct {USItype __l, __h;} __i; \
} __a, __b, __s; \
__a.__i.__l = (al); \
__a.__i.__h = (ah); \
__b.__i.__l = (bl); \
__b.__i.__h = (bh); \
__asm__ ("fisub.dd %1,%2,%0" \
: "=f" (__s.__ll) \
: "%f" (__a.__ll), "f" (__b.__ll)); \
(sh) = __s.__i.__h; \
(sl) = __s.__i.__l; \
} while (0)
#endif
#endif /* __i860__ */
#if defined (__i960__) && W_TYPE_SIZE == 32 #if defined (__i960__) && W_TYPE_SIZE == 32
#define umul_ppmm(w1, w0, u, v) \ #define umul_ppmm(w1, w0, u, v) \
({union {UDItype __ll; \ ({union {UDItype __ll; \
...@@ -589,8 +440,7 @@ UDItype __umulsidi3 (USItype, USItype); ...@@ -589,8 +440,7 @@ UDItype __umulsidi3 (USItype, USItype);
#if defined (__mc68020__) || defined(mc68020) \ #if defined (__mc68020__) || defined(mc68020) \
|| defined(__mc68030__) || defined(mc68030) \ || defined(__mc68030__) || defined(mc68030) \
|| defined(__mc68040__) || defined(mc68040) \ || defined(__mc68040__) || defined(mc68040) \
|| defined(__mcpu32__) || defined(mcpu32) \ || defined(__mcpu32__) || defined(mcpu32)
|| defined(__NeXT__)
#define umul_ppmm(w1, w0, u, v) \ #define umul_ppmm(w1, w0, u, v) \
__asm__ ("mulu%.l %3,%1:%0" \ __asm__ ("mulu%.l %3,%1:%0" \
: "=d" ((USItype) (w0)), \ : "=d" ((USItype) (w0)), \
...@@ -658,8 +508,7 @@ UDItype __umulsidi3 (USItype, USItype); ...@@ -658,8 +508,7 @@ UDItype __umulsidi3 (USItype, USItype);
#if defined (__mc68020__) || defined(mc68020) \ #if defined (__mc68020__) || defined(mc68020) \
|| defined(__mc68030__) || defined(mc68030) \ || defined(__mc68030__) || defined(mc68030) \
|| defined(__mc68040__) || defined(mc68040) \ || defined(__mc68040__) || defined(mc68040) \
|| defined(__mc68060__) || defined(mc68060) \ || defined(__mc68060__) || defined(mc68060)
|| defined(__NeXT__)
#define count_leading_zeros(count, x) \ #define count_leading_zeros(count, x) \
__asm__ ("bfffo %1{%b2:%b2},%0" \ __asm__ ("bfffo %1{%b2:%b2},%0" \
: "=d" ((USItype) (count)) \ : "=d" ((USItype) (count)) \
...@@ -894,35 +743,6 @@ UDItype __umulsidi3 (USItype, USItype); ...@@ -894,35 +743,6 @@ UDItype __umulsidi3 (USItype, USItype);
#endif #endif
#endif /* Power architecture variants. */ #endif /* Power architecture variants. */
#if defined (__pyr__) && W_TYPE_SIZE == 32
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("addw %5,%1\n\taddwc %3,%0" \
: "=r" ((USItype) (sh)), \
"=&r" ((USItype) (sl)) \
: "%0" ((USItype) (ah)), \
"g" ((USItype) (bh)), \
"%1" ((USItype) (al)), \
"g" ((USItype) (bl)))
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
__asm__ ("subw %5,%1\n\tsubwb %3,%0" \
: "=r" ((USItype) (sh)), \
"=&r" ((USItype) (sl)) \
: "0" ((USItype) (ah)), \
"g" ((USItype) (bh)), \
"1" ((USItype) (al)), \
"g" ((USItype) (bl)))
/* This insn works on Pyramids with AP, XP, or MI CPUs, but not with SP. */
#define umul_ppmm(w1, w0, u, v) \
({union {UDItype __ll; \
struct {USItype __h, __l;} __i; \
} __xx; \
__asm__ ("movw %1,%R0\n\tuemul %2,%0" \
: "=&r" (__xx.__ll) \
: "g" ((USItype) (u)), \
"g" ((USItype) (v))); \
(w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
#endif /* __pyr__ */
#if defined (__ibm032__) /* RT/ROMP */ && W_TYPE_SIZE == 32 #if defined (__ibm032__) /* RT/ROMP */ && W_TYPE_SIZE == 32
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("a %1,%5\n\tae %0,%3" \ __asm__ ("a %1,%5\n\tae %0,%3" \
......
...@@ -1213,7 +1213,6 @@ do { \ ...@@ -1213,7 +1213,6 @@ do { \
/* Define a macro to look for REG_INC notes, /* Define a macro to look for REG_INC notes,
but save time on machines where they never exist. */ but save time on machines where they never exist. */
/* Don't continue this line--convex cc version 4.1 would lose. */
#if (defined (HAVE_PRE_INCREMENT) || defined (HAVE_PRE_DECREMENT) || defined (HAVE_POST_INCREMENT) || defined (HAVE_POST_DECREMENT)) #if (defined (HAVE_PRE_INCREMENT) || defined (HAVE_PRE_DECREMENT) || defined (HAVE_POST_INCREMENT) || defined (HAVE_POST_DECREMENT))
#define FIND_REG_INC_NOTE(INSN, REG) \ #define FIND_REG_INC_NOTE(INSN, REG) \
((REG) != NULL_RTX && REG_P ((REG)) \ ((REG) != NULL_RTX && REG_P ((REG)) \
...@@ -1226,7 +1225,6 @@ do { \ ...@@ -1226,7 +1225,6 @@ do { \
/* Indicate whether the machine has any sort of auto increment addressing. /* Indicate whether the machine has any sort of auto increment addressing.
If not, we can avoid checking for REG_INC notes. */ If not, we can avoid checking for REG_INC notes. */
/* Don't continue this line--convex cc version 4.1 would lose. */
#if (defined (HAVE_PRE_INCREMENT) || defined (HAVE_PRE_DECREMENT) || defined (HAVE_POST_INCREMENT) || defined (HAVE_POST_DECREMENT)) #if (defined (HAVE_PRE_INCREMENT) || defined (HAVE_PRE_DECREMENT) || defined (HAVE_POST_INCREMENT) || defined (HAVE_POST_DECREMENT))
#define AUTO_INC_DEC #define AUTO_INC_DEC
#endif #endif
......
...@@ -1614,7 +1614,6 @@ assemble_variable (decl, top_level, at_end, dont_output_data) ...@@ -1614,7 +1614,6 @@ assemble_variable (decl, top_level, at_end, dont_output_data)
rounded = (rounded / (BIGGEST_ALIGNMENT / BITS_PER_UNIT) rounded = (rounded / (BIGGEST_ALIGNMENT / BITS_PER_UNIT)
* (BIGGEST_ALIGNMENT / BITS_PER_UNIT)); * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
/* Don't continue this line--convex cc version 4.1 would lose. */
#if !defined(ASM_OUTPUT_ALIGNED_COMMON) && !defined(ASM_OUTPUT_ALIGNED_DECL_COMMON) && !defined(ASM_OUTPUT_ALIGNED_BSS) #if !defined(ASM_OUTPUT_ALIGNED_COMMON) && !defined(ASM_OUTPUT_ALIGNED_DECL_COMMON) && !defined(ASM_OUTPUT_ALIGNED_BSS)
if ((unsigned HOST_WIDE_INT) DECL_ALIGN (decl) / BITS_PER_UNIT > rounded) if ((unsigned HOST_WIDE_INT) DECL_ALIGN (decl) / BITS_PER_UNIT > rounded)
warning_with_decl warning_with_decl
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment